diff --git a/FeatureRequest.md b/FeatureRequest.md index 75a724d9..5521641e 100644 --- a/FeatureRequest.md +++ b/FeatureRequest.md @@ -11,6 +11,28 @@ ____ +#### #6 Check for double ROI names + +Check during configuration, that ROI names are unique. + +To do: + +* Implementation of ROI name checking in html code before saving analog or digital ROIs + + + +#### #5 Configurable decimal separator (point or comma) + +Decimal separator configurable for different systems + +To do: + +* Implementation of decimal point into postprocessing module +* Extension of configuration +* Adaption of the html configuration to implement shifting + + + #### #4 Initial Shifting and Rotation * https://github.com/jomjol/AI-on-the-edge-device/issues/123 diff --git a/README.md b/README.md index 8be6df4c..5b6e07ba 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,20 @@ If you would like to support the developer with a cup of coffee you can do that **General remark:** Beside the `firmware.bin`, typically also the content of `/html` needs to be updated! +##### Rolling - (2021-04-20) +* Rollback to espressif 2.1.0, as 3.2.0 shows unstable reboot +* Upgrade digital CNN to v8.3.0 (added new type of digits) +* Bugfix: + * WLAN-Password with '=' are now possible, + * Change of hostname thru configini failed unter certain circumstances + +2021-04-05 + +* Removal of not needed web server components (direct access to camera, menu point "check") +* Update to espressif 3.2.0 (= esp-idf 4.2) +* Internal update: TFlite (v2.5), esp32cam, startup-Sequence +* based on v6.6.1 ##### 6.6.1 Image Processing in Memory - (2021-04-05) diff --git a/code/components/connect_wlan/connect_wlan._cpp_ b/code/components/connect_wlan/connect_wlan._cpp_ deleted file mode 100644 index eab7a500..00000000 --- a/code/components/connect_wlan/connect_wlan._cpp_ +++ /dev/null @@ -1,539 +0,0 @@ -#include "connect_wlan.h" - -#include -#include "freertos/FreeRTOS.h" -#include "freertos/task.h" -#include "freertos/event_groups.h" -#include "esp_wifi.h" -#include "esp_log.h" - -#include -#include -#include - -#include "Helper.h" - -static const char *TAG = "connect_wlan"; - -std::string ssid = ""; -std::string passphrase = ""; -std::string hostname = ""; -std::string ipaddress = ""; -std::string gw = ""; -std::string netmask = ""; -std::string dns = ""; - -std::string std_hostname = "watermeter"; - -#define BLINK_GPIO GPIO_NUM_33 - -static EventGroupHandle_t s_wifi_event_group; - -#define WIFI_CONNECTED_BIT BIT0 -#define WIFI_FAIL_BIT BIT1 -static int s_retry_num = 0; - - -std::vector ZerlegeZeile(std::string input, std::string _delimiter = "") -{ - std::vector Output; - std::string delimiter = " =,"; - if (_delimiter.length() > 0){ - delimiter = _delimiter; - } - - input = trim(input, delimiter); - size_t pos = findDelimiterPos(input, delimiter); - std::string token; - while (pos != std::string::npos) { - token = input.substr(0, pos); - token = trim(token, delimiter); - Output.push_back(token); - input.erase(0, pos + 1); - input = trim(input, delimiter); - pos = findDelimiterPos(input, delimiter); - } - Output.push_back(input); - - return Output; -} - - -void blinkstatus(int dauer, int _anzahl) -{ - gpio_reset_pin(BLINK_GPIO); - gpio_set_direction(BLINK_GPIO, GPIO_MODE_OUTPUT); - for (int i = 0; i < _anzahl; ++i) - { - gpio_set_level(BLINK_GPIO, 0); - vTaskDelay(dauer / portTICK_PERIOD_MS); - gpio_set_level(BLINK_GPIO, 1); - vTaskDelay(dauer / portTICK_PERIOD_MS); - } -} - - - - -void strinttoip4(std::string ip, int &a, int &b, int &c, int &d) { - std::stringstream s(ip); - char ch; //to temporarily store the '.' - s >> a >> ch >> b >> ch >> c >> ch >> d; -} - - - - -static void event_handler_neu(void* arg, esp_event_base_t event_base, - int32_t event_id, void* event_data) -{ - if (event_base == WIFI_EVENT && event_id == WIFI_EVENT_STA_START) { - blinkstatus(200, 1); - esp_wifi_connect(); - } else if (event_base == WIFI_EVENT && event_id == WIFI_EVENT_STA_DISCONNECTED) { - blinkstatus(200, 5); - esp_wifi_connect(); - s_retry_num++; - ESP_LOGI(TAG, "retry to connect to the AP"); - } else if (event_base == IP_EVENT && event_id == IP_EVENT_STA_GOT_IP) { - blinkstatus(1000, 3); - ip_event_got_ip_t* event = (ip_event_got_ip_t*) event_data; - ESP_LOGI(TAG, "got ip:" IPSTR, IP2STR(&event->ip_info.ip)); - s_retry_num = 0; - xEventGroupSetBits(s_wifi_event_group, WIFI_CONNECTED_BIT); - } -} - - -void initialise_wifi() -{ - s_wifi_event_group = xEventGroupCreate(); - ESP_ERROR_CHECK(esp_netif_init()); - ESP_ERROR_CHECK(esp_event_loop_create_default()); - esp_netif_create_default_wifi_sta(); - - wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT(); - ESP_ERROR_CHECK(esp_wifi_init(&cfg)); - - esp_event_handler_instance_t instance_any_id; - esp_event_handler_instance_t instance_got_ip; - ESP_ERROR_CHECK(esp_event_handler_instance_register(WIFI_EVENT, - ESP_EVENT_ANY_ID, - &event_handler_neu, - NULL, - &instance_any_id)); - ESP_ERROR_CHECK(esp_event_handler_instance_register(IP_EVENT, - IP_EVENT_STA_GOT_IP, - &event_handler_neu, - NULL, - &instance_got_ip)); - - - wifi_config_t wifi_config = { }; - strcpy((char*)wifi_config.sta.ssid, (const char*)ssid.c_str()); - strcpy((char*)wifi_config.sta.password, (const char*)passphrase.c_str()); - - ESP_ERROR_CHECK(esp_wifi_set_mode(WIFI_MODE_STA) ); - ESP_ERROR_CHECK(esp_wifi_set_config(ESP_IF_WIFI_STA, &wifi_config) ); - ESP_ERROR_CHECK(esp_wifi_start() ); - - ESP_LOGI(TAG, "wifi_init_sta finished."); - - // Waiting until either the connection is established (WIFI_CONNECTED_BIT) or connection failed for the maximum - // number of re-tries (WIFI_FAIL_BIT). The bits are set by event_handler() (see above) - EventBits_t bits = xEventGroupWaitBits(s_wifi_event_group, - WIFI_CONNECTED_BIT | WIFI_FAIL_BIT, - pdFALSE, - pdFALSE, - portMAX_DELAY); - - // xEventGroupWaitBits() returns the bits before the call returned, hence we can test which event actually - // happened. - if (bits & WIFI_CONNECTED_BIT) { - ESP_LOGI(TAG, "connected to ap SSID:%s password:%s", - ssid.c_str(), passphrase.c_str()); - } else if (bits & WIFI_FAIL_BIT) { - ESP_LOGI(TAG, "Failed to connect to SSID:%s, password:%s", - ssid.c_str(), passphrase.c_str()); - } else { - ESP_LOGE(TAG, "UNEXPECTED EVENT"); - } - - // The event will not be processed after unregister - ESP_ERROR_CHECK(esp_event_handler_instance_unregister(IP_EVENT, IP_EVENT_STA_GOT_IP, instance_got_ip)); - ESP_ERROR_CHECK(esp_event_handler_instance_unregister(WIFI_EVENT, ESP_EVENT_ANY_ID, instance_any_id)); - vEventGroupDelete(s_wifi_event_group); - - tcpip_adapter_ip_info_t ip_info; - ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info)); - ipaddress = std::string(ip4addr_ntoa(&ip_info.ip)); - netmask = std::string(ip4addr_ntoa(&ip_info.netmask)); - gw = std::string(ip4addr_ntoa(&ip_info.gw)); - printf("IPv4 : %s\n", ip4addr_ntoa(&ip_info.ip)); - printf("HostName : %s\n", hostname.c_str()); -} - - -void initialise_wifi_fixed_ip2() -{ - s_wifi_event_group = xEventGroupCreate(); - ESP_ERROR_CHECK(esp_netif_init()); - ESP_ERROR_CHECK(esp_event_loop_create_default()); - esp_netif_t *my_sta = esp_netif_create_default_wifi_sta(); - - esp_netif_dhcpc_stop(my_sta); - - esp_netif_ip_info_t ip_info; - - int a, b, c, d; - - strinttoip4(ipaddress, a, b, c, d); - IP4_ADDR(&ip_info.ip, a, b, c, d); - - strinttoip4(gw, a, b, c, d); - IP4_ADDR(&ip_info.gw, a, b, c, d); - - strinttoip4(netmask, a, b, c, d); - IP4_ADDR(&ip_info.netmask, a, b, c, d); - - esp_netif_set_ip_info(my_sta, &ip_info); - - - wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT(); - ESP_ERROR_CHECK(esp_wifi_init(&cfg)); - - if (dns.length() > 0) { - esp_netif_dns_info_t dns_info; - ip4_addr_t ip; - ip.addr = esp_ip4addr_aton(dns.c_str()); - ip_addr_set_ip4_u32(&dns_info.ip, ip.addr); - ESP_ERROR_CHECK(esp_netif_set_dns_info(my_sta, ESP_NETIF_DNS_MAIN, &dns_info)); - } - - - esp_event_handler_instance_t instance_any_id; - esp_event_handler_instance_t instance_got_ip; - ESP_ERROR_CHECK(esp_event_handler_instance_register(WIFI_EVENT, - ESP_EVENT_ANY_ID, - &event_handler_neu, - NULL, - &instance_any_id)); - ESP_ERROR_CHECK(esp_event_handler_instance_register(IP_EVENT, - IP_EVENT_STA_GOT_IP, - &event_handler_neu, - NULL, - &instance_got_ip)); - - - wifi_config_t wifi_config = { }; - strcpy((char*)wifi_config.sta.ssid, (const char*)ssid.c_str()); - strcpy((char*)wifi_config.sta.password, (const char*)passphrase.c_str()); - - ESP_ERROR_CHECK(esp_wifi_set_mode(WIFI_MODE_STA) ); - ESP_ERROR_CHECK(esp_wifi_set_config(ESP_IF_WIFI_STA, &wifi_config) ); - ESP_ERROR_CHECK(esp_wifi_start() ); - - ESP_LOGI(TAG, "wifi_init_sta finished."); - - // Waiting until either the connection is established (WIFI_CONNECTED_BIT) or connection failed for the maximum - // number of re-tries (WIFI_FAIL_BIT). The bits are set by event_handler() (see above) - EventBits_t bits = xEventGroupWaitBits(s_wifi_event_group, - WIFI_CONNECTED_BIT | WIFI_FAIL_BIT, - pdFALSE, - pdFALSE, - portMAX_DELAY); - - // xEventGroupWaitBits() returns the bits before the call returned, hence we can test which event actually - // happened. - if (bits & WIFI_CONNECTED_BIT) { - ESP_LOGI(TAG, "connected to ap SSID:%s password:%s", - ssid.c_str(), passphrase.c_str()); - } else if (bits & WIFI_FAIL_BIT) { - ESP_LOGI(TAG, "Failed to connect to SSID:%s, password:%s", - ssid.c_str(), passphrase.c_str()); - } else { - ESP_LOGE(TAG, "UNEXPECTED EVENT"); - } - - // The event will not be processed after unregister - ESP_ERROR_CHECK(esp_event_handler_instance_unregister(IP_EVENT, IP_EVENT_STA_GOT_IP, instance_got_ip)); - ESP_ERROR_CHECK(esp_event_handler_instance_unregister(WIFI_EVENT, ESP_EVENT_ANY_ID, instance_any_id)); - vEventGroupDelete(s_wifi_event_group); - - tcpip_adapter_ip_info_t ip_info2; - ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info2)); - ipaddress = std::string(ip4addr_ntoa(&ip_info2.ip)); - netmask = std::string(ip4addr_ntoa(&ip_info2.netmask)); - gw = std::string(ip4addr_ntoa(&ip_info2.gw)); -} - -void ConnectToWLAN() -{ - if (ipaddress.length() == 0 || gw.length() == 0 || netmask.length() == 0) - { - printf("Connect to WLAN with dyn. IP\n"); - initialise_wifi(); - } - else - { - printf("Connect to WLAN with fixed IP\n"); - initialise_wifi_fixed_ip2(); - } -} - - -bool ChangeHostName(std::string fn, std::string _newhostname) -{ - if (_newhostname == hostname) - return false; - - string line = ""; - std::vector zerlegt; - - bool found = false; - - std::vector neuesfile; - - FILE* pFile; - fn = FormatFileName(fn); - pFile = OpenFileAndWait(fn.c_str(), "r"); - - printf("file loaded\n"); - - if (pFile == NULL) - return false; - - char zw[1024]; - fgets(zw, 1024, pFile); - line = std::string(zw); - - while ((line.size() > 0) || !(feof(pFile))) - { - printf("%s", line.c_str()); - zerlegt = ZerlegeZeile(line, "="); - zerlegt[0] = trim(zerlegt[0], " "); - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){ - line = "hostname = \"" + _newhostname + "\"\n"; - found = true; - } - - neuesfile.push_back(line); - - if (fgets(zw, 1024, pFile) == NULL) - { - line = ""; - } - else - { - line = std::string(zw); - } - } - - if (!found) - { - line = "hostname = \"" + _newhostname + "\"\n"; - neuesfile.push_back(line); - } - - fclose(pFile); - - pFile = OpenFileAndWait(fn.c_str(), "w+"); - - for (int i = 0; i < neuesfile.size(); ++i) - { - fputs(neuesfile[i].c_str(), pFile); - } - - fclose(pFile); - - return true; -} - - -void LoadWlanFromFile(std::string fn) -{ - string line = ""; - std::vector zerlegt; - hostname = std_hostname; - - FILE* pFile; - fn = FormatFileName(fn); - - pFile = OpenFileAndWait(fn.c_str(), "r"); - printf("file loaded\n"); - - if (pFile == NULL) - return; - - char zw[1024]; - fgets(zw, 1024, pFile); - line = std::string(zw); - - while ((line.size() > 0) || !(feof(pFile))) - { - printf("%s", line.c_str()); - zerlegt = ZerlegeZeile(line, "="); - zerlegt[0] = trim(zerlegt[0], " "); - for (int i = 2; i < zerlegt.size(); ++i) - zerlegt[i] = zerlegt[i-1] + zerlegt[i]; - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){ - hostname = trim(zerlegt[1]); - if ((hostname[0] == '"') && (hostname[hostname.length()-1] == '"')){ - hostname = hostname.substr(1, hostname.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "SSID")){ - ssid = trim(zerlegt[1]); - if ((ssid[0] == '"') && (ssid[ssid.length()-1] == '"')){ - ssid = ssid.substr(1, ssid.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "PASSWORD")){ - passphrase = zerlegt[1]; - if ((passphrase[0] == '"') && (passphrase[passphrase.length()-1] == '"')){ - passphrase = passphrase.substr(1, passphrase.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){ - ipaddress = zerlegt[1]; - if ((ipaddress[0] == '"') && (ipaddress[ipaddress.length()-1] == '"')){ - ipaddress = ipaddress.substr(1, ipaddress.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){ - gw = zerlegt[1]; - if ((gw[0] == '"') && (gw[gw.length()-1] == '"')){ - gw = gw.substr(1, gw.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){ - netmask = zerlegt[1]; - if ((netmask[0] == '"') && (netmask[netmask.length()-1] == '"')){ - netmask = netmask.substr(1, netmask.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){ - dns = zerlegt[1]; - if ((dns[0] == '"') && (dns[dns.length()-1] == '"')){ - dns = dns.substr(1, dns.length()-2); - } - } - - - if (fgets(zw, 1024, pFile) == NULL) - { - line = ""; - } - else - { - line = std::string(zw); - } - } - - fclose(pFile); - - // Check if Hostname was empty in .ini if yes set to std_hostname - if(hostname.length() <= 0){ - hostname = std_hostname; - } - - printf("\nWLan: %s, %s\n", ssid.c_str(), passphrase.c_str()); - printf("Hostename: %s\n", hostname.c_str()); - printf("Fixed IP: %s, Gateway %s, Netmask %s, DNS %s\n", ipaddress.c_str(), gw.c_str(), netmask.c_str(), dns.c_str()); - -} - -void LoadNetConfigFromFile(std::string _fn, std::string &_ip, std::string &_gw, std::string &_netmask, std::string &_dns) -{ - string line = ""; - std::vector zerlegt; - - - - FILE* pFile; - _fn = FormatFileName(_fn); - pFile = OpenFileAndWait(_fn.c_str(), "r"); - - if (pFile == NULL) - return; - - char zw[1024]; - fgets(zw, 1024, pFile); - line = std::string(zw); - - while ((line.size() > 0) || !(feof(pFile))) - { - printf("%s", line.c_str()); - zerlegt = ZerlegeZeile(line, "="); - zerlegt[0] = trim(zerlegt[0], " "); - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){ - _ip = zerlegt[1]; - if ((_ip[0] == '"') && (_ip[_ip.length()-1] == '"')){ - _ip = _ip.substr(1, _ip.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){ - _gw = zerlegt[1]; - if ((_gw[0] == '"') && (_gw[_gw.length()-1] == '"')){ - _gw = _gw.substr(1, _gw.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){ - _netmask = zerlegt[1]; - if ((_netmask[0] == '"') && (_netmask[_netmask.length()-1] == '"')){ - _netmask = _netmask.substr(1, _netmask.length()-2); - } - } - - if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){ - _dns = zerlegt[1]; - if ((_dns[0] == '"') && (_dns[_dns.length()-1] == '"')){ - _dns = _dns.substr(1, _dns.length()-2); - } - } - - if (fgets(zw, 1024, pFile) == NULL) - { - line = ""; - } - else - { - line = std::string(zw); - } - } - - fclose(pFile); -} - - -std::string getHostname(){ - return hostname; -} - -std::string getIPAddress(){ - return ipaddress; -} - -std::string getSSID(){ - return ssid; -} - -std::string getNetMask(){ - return netmask; -} - -std::string getGW(){ - return gw; -} - diff --git a/code/components/connect_wlan/connect_wlan._h_ b/code/components/connect_wlan/connect_wlan._h_ deleted file mode 100644 index f71962c3..00000000 --- a/code/components/connect_wlan/connect_wlan._h_ +++ /dev/null @@ -1,21 +0,0 @@ -//#ifndef CONNECT_WLAN_H -//#define CONNECT_WLAN_H - -#include -#include "driver/gpio.h" - -const int CONNECTED_BIT = BIT0; - -void ConnectToWLAN(); - -void LoadWlanFromFile(std::string fn); - -bool ChangeHostName(std::string fn, std::string _newhostname); - -std::string getHostname(); -std::string getIPAddress(); -std::string getSSID(); -std::string getNetMask(); -std::string getGW(); - -//#endif \ No newline at end of file diff --git a/code/components/connect_wlan/connect_wlan.cpp b/code/components/connect_wlan/connect_wlan.cpp index e01b6bcf..5dc94ef2 100644 --- a/code/components/connect_wlan/connect_wlan.cpp +++ b/code/components/connect_wlan/connect_wlan.cpp @@ -282,7 +282,7 @@ bool ChangeHostName(std::string fn, std::string _newhostname) if (!found) { - line = "hostname = \"" + _newhostname + "\"\n"; + line = "\nhostname = \"" + _newhostname + "\"\n"; neuesfile.push_back(line); } @@ -329,7 +329,7 @@ void LoadWlanFromFile(std::string fn) zerlegt = ZerlegeZeile(line, "="); zerlegt[0] = trim(zerlegt[0], " "); for (int i = 2; i < zerlegt.size(); ++i) - zerlegt[i] = zerlegt[i-1] + zerlegt[i]; + zerlegt[1] = zerlegt[1] + zerlegt[i]; if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){ hostname = trim(zerlegt[1]); diff --git a/code/components/esp32-camera-master/conversions/include/img_converters.h b/code/components/esp32-camera-master/conversions/include/img_converters.h index 2b83c4d6..330f8db8 100644 --- a/code/components/esp32-camera-master/conversions/include/img_converters.h +++ b/code/components/esp32-camera-master/conversions/include/img_converters.h @@ -62,7 +62,8 @@ bool frame2jpg_cb(camera_fb_t * fb, uint8_t quality, jpg_out_cb cb, void * arg); * @param height Height in pixels of the source image * @param format Format of the source image * @param quality JPEG quality of the resulting image - * @param out Pointer to be populated with the address of the resulting buffer + * @param out Pointer to be populated with the address of the resulting buffer. + * You MUST free the pointer once you are done with it. * @param out_len Pointer to be populated with the length of the output buffer * * @return true on success diff --git a/code/components/esp32-camera-master/conversions/to_bmp.c b/code/components/esp32-camera-master/conversions/to_bmp.c index 85f9c887..3b5e2b70 100644 --- a/code/components/esp32-camera-master/conversions/to_bmp.c +++ b/code/components/esp32-camera-master/conversions/to_bmp.c @@ -317,7 +317,7 @@ bool fmt2bmp(uint8_t *src, size_t src_len, uint16_t width, uint16_t height, pixf } *out = out_buf; *out_len = out_size; - return true; + return true; } bool frame2bmp(camera_fb_t * fb, uint8_t ** out, size_t * out_len) diff --git a/code/components/esp32-camera-master/driver/camera.c b/code/components/esp32-camera-master/driver/camera.c index b89dd37a..9e6a7164 100644 --- a/code/components/esp32-camera-master/driver/camera.c +++ b/code/components/esp32-camera-master/driver/camera.c @@ -1321,7 +1321,7 @@ esp_err_t camera_init(const camera_config_t* config) } vsync_intr_disable(); - err = gpio_install_isr_service(ESP_INTR_FLAG_LEVEL1 | ESP_INTR_FLAG_IRAM); + err = gpio_install_isr_service(ESP_INTR_FLAG_LOWMED | ESP_INTR_FLAG_IRAM); if (err != ESP_OK) { if (err != ESP_ERR_INVALID_STATE) { ESP_LOGE(TAG, "gpio_install_isr_service failed (%x)", err); diff --git a/code/components/esp32-camera-master/idf_component.yml b/code/components/esp32-camera-master/idf_component.yml index 9f9e0fcb..d8dc8e15 100644 --- a/code/components/esp32-camera-master/idf_component.yml +++ b/code/components/esp32-camera-master/idf_component.yml @@ -1,5 +1,3 @@ -name: "esp32-camera" - version: "1.0.0" - description: This package hosts ESP32 compatible driver for OV2640 image sensors. Additionally it provides a few tools, which allow converting the captured frame data to the more common BMP and JPEG formats. +url: https://github.com/espressif/esp32-camera diff --git a/code/components/jomjol_controlcamera/CMakeLists.txt b/code/components/jomjol_controlcamera/CMakeLists.txt index 41380d13..ad4b0f20 100644 --- a/code/components/jomjol_controlcamera/CMakeLists.txt +++ b/code/components/jomjol_controlcamera/CMakeLists.txt @@ -4,6 +4,6 @@ list(APPEND EXTRA_COMPONENT_DIRS $ENV{IDF_PATH}/examples/common_components/proto idf_component_register(SRCS ${app_sources} INCLUDE_DIRS "." - REQUIRES esp32-camera-master esp_http_server jomjol_logfile jomjol_image_proc nvs_flash) + REQUIRES esp32-camera-master esp_http_server jomjol_logfile jomjol_image_proc jomjol_fileserver_ota nvs_flash) diff --git a/code/components/jomjol_controlcamera/ClassControllCamera.cpp b/code/components/jomjol_controlcamera/ClassControllCamera.cpp index 7dfd7e52..6d537371 100644 --- a/code/components/jomjol_controlcamera/ClassControllCamera.cpp +++ b/code/components/jomjol_controlcamera/ClassControllCamera.cpp @@ -8,6 +8,7 @@ #include "Helper.h" #include "CImageBasis.h" +#include "server_ota.h" #define BOARD_ESP32CAM_AITHINKER @@ -71,7 +72,7 @@ static camera_config_t camera_config = { //XCLK 20MHz or 10MHz for OV2640 double FPS (Experimental) // .xclk_freq_hz = 20000000, // Orginalwert - .xclk_freq_hz = 5000000, // Test, um die Bildfehler los zu werden !!!! + .xclk_freq_hz = 5000000, // Test, um die Bildfehler los zu werden !!!! ging mal mit 5000000 .ledc_timer = LEDC_TIMER_0, .ledc_channel = LEDC_CHANNEL_0, @@ -82,7 +83,7 @@ static camera_config_t camera_config = { .jpeg_quality = 5, //0-63 lower number means higher quality - .fb_count = 1 //if more than one, i2s runs in continuous mode. Use only with JPEG + .fb_count = 2 //if more than one, i2s runs in continuous mode. Use only with JPEG }; @@ -224,17 +225,10 @@ void CCamera::EnableAutoExposure(int flashdauer) const TickType_t xDelay = flashdauer / portTICK_PERIOD_MS; vTaskDelay( xDelay ); - camera_fb_t * fb = esp_camera_fb_get(); - if (!fb) { - ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed"); - } - esp_camera_fb_return(fb); - sensor_t * s = esp_camera_sensor_get(); s->set_gain_ctrl(s, 0); s->set_exposure_ctrl(s, 0); - LEDOnOff(false); LightOnOff(false); isFixedExposure = true; @@ -270,7 +264,10 @@ esp_err_t CCamera::CaptureToBasisImage(CImageBasis *_Image, int delay) camera_fb_t * fb = esp_camera_fb_get(); if (!fb) { ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed"); - LEDOnOff(false); + LightOnOff(false); + LogFile.WriteHeapInfo("Camera Capture Failed - Reinit Camera"); + Camera.InitCam(); + doReboot(); return ESP_FAIL; } @@ -355,6 +352,9 @@ esp_err_t CCamera::CaptureToFile(std::string nm, int delay) if (!fb) { ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed"); LEDOnOff(false); + LogFile.WriteHeapInfo("Camera Capture Failed - Reinit Camera"); + Camera.InitCam(); + doReboot(); return ESP_FAIL; } LEDOnOff(false); @@ -439,10 +439,13 @@ esp_err_t CCamera::CaptureToHTTP(httpd_req_t *req, int delay) vTaskDelay( xDelay ); } - fb = esp_camera_fb_get(); if (!fb) { ESP_LOGE(TAGCAMERACLASS, "Camera capture failed"); + LightOnOff(false); + LogFile.WriteHeapInfo("Camera Capture Failed - Reinit Camera"); + Camera.InitCam(); + doReboot(); httpd_resp_send_500(req); return ESP_FAIL; } @@ -578,10 +581,16 @@ CCamera::CCamera() contrast = -5; saturation = -5; isFixedExposure = false; + ActualQuality = camera_config.jpeg_quality; + ActualResolution = camera_config.frame_size; + } esp_err_t CCamera::InitCam() { + esp_camera_deinit(); + PowerResetCamera(); + if(CAM_PIN_PWDN != -1){ // Init the GPIO gpio_pad_select_gpio(CAM_PIN_PWDN); @@ -591,8 +600,6 @@ esp_err_t CCamera::InitCam() } printf("Init Camera\n"); - ActualQuality = camera_config.jpeg_quality; - ActualResolution = camera_config.frame_size; //initialize the camera esp_err_t err = esp_camera_init(&camera_config); if (err != ESP_OK) { @@ -600,5 +607,42 @@ esp_err_t CCamera::InitCam() return err; } + SetBrightnessContrastSaturation(brightness, contrast, saturation); + SetQualitySize(ActualQuality, ActualResolution); + + if (isFixedExposure) + EnableAutoExposure(waitbeforepicture_org); + + LightOnOff(false); + return ESP_OK; -} \ No newline at end of file +} + + + +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////////////// + + +void PowerResetCamera(){ + printf("Resetting camera by power down line.\n"); +/* + gpio_config_t conf = { 0 }; + conf.pin_bit_mask = 1LL << GPIO_NUM_32; + conf.mode = GPIO_MODE_OUTPUT; + gpio_config(&conf); +*/ + + gpio_pad_select_gpio(GPIO_NUM_32); + /* Set the GPIO as a push/pull output */ + gpio_set_direction(GPIO_NUM_32, GPIO_MODE_OUTPUT); + + + // carefull, logic is inverted compared to reset pin + gpio_set_level(GPIO_NUM_32, 0); // ehemals 1 !!!!!!!!!!!!!!!!!!!! + vTaskDelay(1000 / portTICK_PERIOD_MS); + gpio_set_level(GPIO_NUM_32, 1); // ehemals 0 !!!!!!!!!!!!!!!!!!!! + vTaskDelay(1000 / portTICK_PERIOD_MS); +} + diff --git a/code/components/jomjol_controlcamera/ClassControllCamera.h b/code/components/jomjol_controlcamera/ClassControllCamera.h index b4389b8a..ef701577 100644 --- a/code/components/jomjol_controlcamera/ClassControllCamera.h +++ b/code/components/jomjol_controlcamera/ClassControllCamera.h @@ -25,6 +25,7 @@ class CCamera { framesize_t ActualResolution; int brightness, contrast, saturation; bool isFixedExposure; + int waitbeforepicture_org; public: @@ -49,6 +50,7 @@ class CCamera { esp_err_t CaptureToBasisImage(CImageBasis *_Image, int delay = 0); }; +void PowerResetCamera(); extern CCamera Camera; diff --git a/code/components/jomjol_controlcamera/server_camera.cpp b/code/components/jomjol_controlcamera/server_camera.cpp deleted file mode 100644 index d318a736..00000000 --- a/code/components/jomjol_controlcamera/server_camera.cpp +++ /dev/null @@ -1,246 +0,0 @@ -#include "server_camera.h" - -#include -#include "string.h" - -#include "esp_camera.h" -#include "ClassControllCamera.h" - -#include "ClassLogFile.h" - -#define SCRATCH_BUFSIZE2 8192 -char scratch2[SCRATCH_BUFSIZE2]; - -//#define DEBUG_DETAIL_ON - - - -void PowerResetCamera(){ - ESP_LOGD(TAGPARTCAMERA, "Resetting camera by power down line"); - gpio_config_t conf = { 0 }; - conf.pin_bit_mask = 1LL << GPIO_NUM_32; - conf.mode = GPIO_MODE_OUTPUT; - gpio_config(&conf); - - // carefull, logic is inverted compared to reset pin - gpio_set_level(GPIO_NUM_32, 1); - vTaskDelay(1000 / portTICK_PERIOD_MS); - gpio_set_level(GPIO_NUM_32, 0); - vTaskDelay(1000 / portTICK_PERIOD_MS); -} - - -esp_err_t handler_lightOn(httpd_req_t *req) -{ -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_lightOn - Start"); - printf("handler_lightOn uri:\n"); printf(req->uri); printf("\n"); -#endif - - Camera.LightOnOff(true); - const char* resp_str = (const char*) req->user_ctx; - httpd_resp_send(req, resp_str, strlen(resp_str)); - -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_lightOn - Done"); -#endif - - return ESP_OK; -}; - -esp_err_t handler_lightOff(httpd_req_t *req) -{ -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_lightOff - Start"); - printf("handler_lightOff uri:\n"); printf(req->uri); printf("\n"); -#endif - Camera.LightOnOff(false); - const char* resp_str = (const char*) req->user_ctx; - httpd_resp_send(req, resp_str, strlen(resp_str)); - -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_lightOff - Done"); -#endif - - return ESP_OK; -}; - -esp_err_t handler_capture(httpd_req_t *req) -{ -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_capture - Start"); -#endif - - int quality; - framesize_t res; - - Camera.GetCameraParameter(req, quality, res); - -#ifdef DEBUG_DETAIL_ON - printf("Size: %d", res); printf(" Quality: %d\n", quality); -#endif - - Camera.SetQualitySize(quality, res); - - esp_err_t ressult; - ressult = Camera.CaptureToHTTP(req); - -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_capture - Done"); -#endif - - return ressult; -}; - - -esp_err_t handler_capture_with_ligth(httpd_req_t *req) -{ -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_capture_with_ligth - Start"); -#endif - char _query[100]; - char _delay[10]; - - int quality; - framesize_t res; - int delay = 2500; - - if (httpd_req_get_url_query_str(req, _query, 100) == ESP_OK) - { - printf("Query: "); printf(_query); printf("\n"); - if (httpd_query_key_value(_query, "delay", _delay, 10) == ESP_OK) - { -#ifdef DEBUG_DETAIL_ON - printf("Delay: "); printf(_delay); printf("\n"); -#endif - delay = atoi(_delay); - - if (delay < 0) - delay = 0; - } - }; - - Camera.GetCameraParameter(req, quality, res); - -#ifdef DEBUG_DETAIL_ON - printf("Size: %d", res); printf(" Quality: %d\n", quality); -#endif - - Camera.SetQualitySize(quality, res); - Camera.LightOnOff(true); - const TickType_t xDelay = delay / portTICK_PERIOD_MS; - vTaskDelay( xDelay ); - - esp_err_t ressult; - ressult = Camera.CaptureToHTTP(req); - - Camera.LightOnOff(false); - -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_capture_with_ligth - Done"); -#endif - - return ressult; -}; - - - -esp_err_t handler_capture_save_to_file(httpd_req_t *req) -{ -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_capture_save_to_file - Start"); -#endif - - char _query[100]; - char _delay[10]; - int delay = 0; - char filename[100]; - std::string fn = "/sdcard/"; - - - int quality; - framesize_t res; - - if (httpd_req_get_url_query_str(req, _query, 100) == ESP_OK) - { - printf("Query: "); printf(_query); printf("\n"); - if (httpd_query_key_value(_query, "filename", filename, 100) == ESP_OK) - { - fn.append(filename); -#ifdef DEBUG_DETAIL_ON - printf("Filename: "); printf(fn.c_str()); printf("\n"); -#endif - } - else - fn.append("noname.jpg"); - - if (httpd_query_key_value(_query, "delay", _delay, 10) == ESP_OK) - { -#ifdef DEBUG_DETAIL_ON - printf("Delay: "); printf(_delay); printf("\n"); -#endif - delay = atoi(_delay); - - if (delay < 0) - delay = 0; - } - - } - else - fn.append("noname.jpg"); - - Camera.GetCameraParameter(req, quality, res); -#ifdef DEBUG_DETAIL_ON - printf("Size: %d", res); printf(" Quality: %d\n", quality); -#endif - Camera.SetQualitySize(quality, res); - - esp_err_t ressult; - ressult = Camera.CaptureToFile(fn, delay); - - const char* resp_str = (const char*) fn.c_str(); - httpd_resp_send(req, resp_str, strlen(resp_str)); - -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_capture_save_to_file - Done"); -#endif - - return ressult; -}; - - - -void register_server_camera_uri(httpd_handle_t server) -{ -#ifdef DEBUG_DETAIL_ON - ESP_LOGI(TAGPARTCAMERA, "server_part_camera - Registering URI handlers"); -#endif - - httpd_uri_t camuri = { }; - camuri.method = HTTP_GET; - - camuri.uri = "/lighton"; - camuri.handler = handler_lightOn; - camuri.user_ctx = (void*) "Light On"; - httpd_register_uri_handler(server, &camuri); - - camuri.uri = "/lightoff"; - camuri.handler = handler_lightOff; - camuri.user_ctx = (void*) "Light Off"; - httpd_register_uri_handler(server, &camuri); - - camuri.uri = "/capture"; - camuri.handler = handler_capture; - camuri.user_ctx = NULL; - httpd_register_uri_handler(server, &camuri); - - camuri.uri = "/capture_with_flashlight"; - camuri.handler = handler_capture_with_ligth; - camuri.user_ctx = NULL; - httpd_register_uri_handler(server, &camuri); - - camuri.uri = "/save"; - camuri.handler = handler_capture_save_to_file; - camuri.user_ctx = NULL; - httpd_register_uri_handler(server, &camuri); -} diff --git a/code/components/jomjol_controlcamera/server_camera.h b/code/components/jomjol_controlcamera/server_camera.h deleted file mode 100644 index b3f1fe19..00000000 --- a/code/components/jomjol_controlcamera/server_camera.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef JOMJOL_CONTROLCAMERA_H -#define JOMJOL_CONTROLCAMERA_H - -#include - -#include - -//#include "ClassControllCamera.h" - -static const char *TAGPARTCAMERA = "server_camera"; - -void register_server_camera_uri(httpd_handle_t server); - -void PowerResetCamera(); - -#endif \ No newline at end of file diff --git a/code/components/jomjol_fileserver_ota/server_file.cpp b/code/components/jomjol_fileserver_ota/server_file.cpp index 26bf32b0..05588aad 100644 --- a/code/components/jomjol_fileserver_ota/server_file.cpp +++ b/code/components/jomjol_fileserver_ota/server_file.cpp @@ -721,33 +721,21 @@ void register_server_file_uri(httpd_handle_t server, const char *base_path) /* Validate file storage base path */ if (!base_path) { -// if (!base_path || strcmp(base_path, "/spiffs") != 0) { ESP_LOGE(TAG, "File server base_path not set"); -// return ESP_ERR_INVALID_ARG; } if (server_data) { ESP_LOGE(TAG, "File server already started"); -// return ESP_ERR_INVALID_STATE; } /* Allocate memory for server data */ server_data = (file_server_data *) calloc(1, sizeof(struct file_server_data)); if (!server_data) { ESP_LOGE(TAG, "Failed to allocate memory for server data"); -// return ESP_ERR_NO_MEM; } strlcpy(server_data->base_path, base_path, sizeof(server_data->base_path)); - - - /* URI handler for getting uploaded files */ -// char zw[sizeof(serverprefix)+1]; -// strcpy(zw, serverprefix); -// zw[strlen(serverprefix)] = '*'; -// zw[strlen(serverprefix)+1] = '\0'; -// printf("zw: %s\n", zw); httpd_uri_t file_download = { .uri = "/fileserver*", // Match all URIs of type /path/to/file .method = HTTP_GET, @@ -756,8 +744,6 @@ void register_server_file_uri(httpd_handle_t server, const char *base_path) }; httpd_register_uri_handler(server, &file_download); - - httpd_uri_t file_logfileact = { .uri = "/logfileact", // Match all URIs of type /path/to/file .method = HTTP_GET, @@ -784,5 +770,4 @@ void register_server_file_uri(httpd_handle_t server, const char *base_path) .user_ctx = server_data // Pass server data as context }; httpd_register_uri_handler(server, &file_delete); - } diff --git a/code/components/jomjol_fileserver_ota/server_ota.cpp b/code/components/jomjol_fileserver_ota/server_ota.cpp index 6c69ae1e..e2304d40 100644 --- a/code/components/jomjol_fileserver_ota/server_ota.cpp +++ b/code/components/jomjol_fileserver_ota/server_ota.cpp @@ -412,11 +412,26 @@ void task_reboot(void *pvParameter) vTaskDelete(NULL); //Delete this task if it exits from the loop above } + + +inline void invoke_abort(void) +{ + while (1) { + if (esp_cpu_in_ocd_debug_mode()) { + __asm__ ("break 0,0"); + } + *((int *) 0) = 0; + } +} + + void doReboot(){ LogFile.WriteToFile("Reboot - now"); - KillTFliteTasks(); xTaskCreate(&task_reboot, "reboot", configMINIMAL_STACK_SIZE * 64, NULL, 10, NULL); + KillTFliteTasks(); +// xTaskCreate(&task_reboot, "reboot", configMINIMAL_STACK_SIZE * 64, NULL, 10, NULL); vTaskDelay(5000 / portTICK_PERIOD_MS); + invoke_abort(); esp_restart(); hard_restart(); } diff --git a/code/components/jomjol_flowcontroll/ClassFlowAnalog.cpp b/code/components/jomjol_flowcontroll/ClassFlowAnalog.cpp index 007c87b4..04fdd886 100644 --- a/code/components/jomjol_flowcontroll/ClassFlowAnalog.cpp +++ b/code/components/jomjol_flowcontroll/ClassFlowAnalog.cpp @@ -3,6 +3,8 @@ #include #include #include +#include // std::stringstream + // #define OHNETFLITE @@ -283,7 +285,7 @@ bool ClassFlowAnalog::doNeuralNetwork(string time) zwcnn = FormatFileName(zwcnn); printf(zwcnn.c_str());printf("\n"); tflite->LoadModel(zwcnn); - tflite->MakeAllocate(); +// tflite->MakeAllocate(); #endif for (int i = 0; i < ROI.size(); ++i) diff --git a/code/components/jomjol_flowcontroll/ClassFlowDigit.cpp b/code/components/jomjol_flowcontroll/ClassFlowDigit.cpp index 9225ce6e..7e405e4e 100644 --- a/code/components/jomjol_flowcontroll/ClassFlowDigit.cpp +++ b/code/components/jomjol_flowcontroll/ClassFlowDigit.cpp @@ -224,7 +224,7 @@ bool ClassFlowDigit::doNeuralNetwork(string time) string zwcnn = FormatFileName("/sdcard" + cnnmodelfile); printf(zwcnn.c_str());printf("\n"); tflite->LoadModel(zwcnn); - tflite->MakeAllocate(); +// tflite->MakeAllocate(); #endif for (int i = 0; i < ROI.size(); ++i) diff --git a/code/components/jomjol_flowcontroll/camera_define.h b/code/components/jomjol_flowcontroll/camera_define.h deleted file mode 100644 index 8629c232..00000000 --- a/code/components/jomjol_flowcontroll/camera_define.h +++ /dev/null @@ -1,101 +0,0 @@ -#ifndef CAMERADEFINED -#define CAMERADEFINED - - -#if defined(CAMERA_MODEL_WROVER_KIT) -#define PWDN_GPIO_NUM -1 -#define RESET_GPIO_NUM -1 -#define XCLK_GPIO_NUM 21 -#define SIOD_GPIO_NUM 26 -#define SIOC_GPIO_NUM 27 - -#define Y9_GPIO_NUM 35 -#define Y8_GPIO_NUM 34 -#define Y7_GPIO_NUM 39 -#define Y6_GPIO_NUM 36 -#define Y5_GPIO_NUM 19 -#define Y4_GPIO_NUM 18 -#define Y3_GPIO_NUM 5 -#define Y2_GPIO_NUM 4 -#define VSYNC_GPIO_NUM 25 -#define HREF_GPIO_NUM 23 -#define PCLK_GPIO_NUM 22 - -#elif defined(CAMERA_MODEL_M5STACK_PSRAM) -#define PWDN_GPIO_NUM -1 -#define RESET_GPIO_NUM 15 -#define XCLK_GPIO_NUM 27 -#define SIOD_GPIO_NUM 25 -#define SIOC_GPIO_NUM 23 - -#define Y9_GPIO_NUM 19 -#define Y8_GPIO_NUM 36 -#define Y7_GPIO_NUM 18 -#define Y6_GPIO_NUM 39 -#define Y5_GPIO_NUM 5 -#define Y4_GPIO_NUM 34 -#define Y3_GPIO_NUM 35 -#define Y2_GPIO_NUM 32 -#define VSYNC_GPIO_NUM 22 -#define HREF_GPIO_NUM 26 -#define PCLK_GPIO_NUM 21 - -#elif defined(CAMERA_MODEL_AI_THINKER) -#define PWDN_GPIO_NUM GPIO_NUM_32 -#define RESET_GPIO_NUM -1 -#define XCLK_GPIO_NUM GPIO_NUM_0 -#define SIOD_GPIO_NUM GPIO_NUM_26 -#define SIOC_GPIO_NUM GPIO_NUM_27 - -#define Y9_GPIO_NUM GPIO_NUM_35 -#define Y8_GPIO_NUM GPIO_NUM_34 -#define Y7_GPIO_NUM GPIO_NUM_39 -#define Y6_GPIO_NUM GPIO_NUM_36 -#define Y5_GPIO_NUM GPIO_NUM_21 -#define Y4_GPIO_NUM GPIO_NUM_19 -#define Y3_GPIO_NUM GPIO_NUM_18 -#define Y2_GPIO_NUM GPIO_NUM_5 -#define VSYNC_GPIO_NUM GPIO_NUM_25 -#define HREF_GPIO_NUM GPIO_NUM_23 -#define PCLK_GPIO_NUM GPIO_NUM_22 - -#else -#error "Camera model not selected" -#endif - - - -static camera_config_t camera_config = { - .pin_pwdn = PWDN_GPIO_NUM, - .pin_reset = RESET_GPIO_NUM, - .pin_xclk = XCLK_GPIO_NUM, - .pin_sscb_sda = SIOD_GPIO_NUM, - .pin_sscb_scl = SIOC_GPIO_NUM, - - .pin_d7 = Y9_GPIO_NUM, - .pin_d6 = Y8_GPIO_NUM, - .pin_d5 = Y7_GPIO_NUM, - .pin_d4 = Y6_GPIO_NUM, - .pin_d3 = Y5_GPIO_NUM, - .pin_d2 = Y4_GPIO_NUM, - .pin_d1 = Y3_GPIO_NUM, - .pin_d0 = Y2_GPIO_NUM, - .pin_vsync = VSYNC_GPIO_NUM, - .pin_href = HREF_GPIO_NUM, - .pin_pclk = PCLK_GPIO_NUM, - - //XCLK 20MHz or 10MHz for OV2640 double FPS (Experimental) - .xclk_freq_hz = 20000000, - .ledc_timer = LEDC_TIMER_0, - .ledc_channel = LEDC_CHANNEL_0, - - .pixel_format = PIXFORMAT_JPEG,//YUV422,GRAYSCALE,RGB565,JPEG -// .pixel_format = PIXFORMAT_RGB888,//YUV422,GRAYSCALE,RGB565,JPEG -// .frame_size = FRAMESIZE_QVGA,//QQVGA-QXGA Do not use sizes above QVGA when not JPEG - .frame_size = FRAMESIZE_SVGA,//QQVGA-QXGA Do not use sizes above QVGA when not JPEG - - .jpeg_quality = 12, //0-63 lower number means higher quality - .fb_count = 1 //if more than one, i2s runs in continuous mode. Use only with JPEG -}; - -#endif diff --git a/code/components/jomjol_tfliteclass.zip b/code/components/jomjol_tfliteclass.zip new file mode 100644 index 00000000..2bbe04f9 Binary files /dev/null and b/code/components/jomjol_tfliteclass.zip differ diff --git a/code/components/jomjol_tfliteclass/CTfLiteClass.cpp b/code/components/jomjol_tfliteclass/CTfLiteClass.cpp index d0a529f5..00a4f0fe 100644 --- a/code/components/jomjol_tfliteclass/CTfLiteClass.cpp +++ b/code/components/jomjol_tfliteclass/CTfLiteClass.cpp @@ -6,9 +6,13 @@ // #define DEBUG_DETAIL_ON +//#define GET_MEMORY(X) malloc(X) +#define GET_MEMORY(X) heap_caps_malloc(X, MALLOC_CAP_SPIRAM) + + float CTfLiteClass::GetOutputValue(int nr) { - TfLiteTensor* output2 = this->interpreter->output(0); + TfLiteTensor* output2 = interpreter->output(0); int numeroutput = output2->dims->data[1]; if ((nr+1) > numeroutput) @@ -53,7 +57,7 @@ int CTfLiteClass::GetOutClassification() void CTfLiteClass::GetInputDimension(bool silent = false) { - TfLiteTensor* input2 = this->interpreter->input(0); + TfLiteTensor* input2 = interpreter->input(0); int numdim = input2->dims->size; if (!silent) printf("NumDimension: %d\n", numdim); @@ -72,7 +76,7 @@ void CTfLiteClass::GetInputDimension(bool silent = false) void CTfLiteClass::GetOutPut() { - TfLiteTensor* output2 = this->interpreter->output(0); + TfLiteTensor* output2 = interpreter->output(0); int numdim = output2->dims->size; printf("NumDimension: %d\n", numdim); @@ -142,20 +146,20 @@ void CTfLiteClass::MakeAllocate() static tflite::AllOpsResolver resolver; // printf(LogFile.getESPHeapInfo().c_str()); printf("\n"); - this->interpreter = new tflite::MicroInterpreter(this->model, resolver, this->tensor_arena, this->kTensorArenaSize, this->error_reporter); + interpreter = new tflite::MicroInterpreter(model, resolver, tensor_arena, kTensorArenaSize, error_reporter); // printf(LogFile.getESPHeapInfo().c_str()); printf("\n"); - TfLiteStatus allocate_status = this->interpreter->AllocateTensors(); + TfLiteStatus allocate_status = interpreter->AllocateTensors(); if (allocate_status != kTfLiteOk) { TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed"); - this->GetInputDimension(); + GetInputDimension(); return; } // printf("Allocate Done.\n"); } void CTfLiteClass::GetInputTensorSize(){ - float *zw = this->input; + float *zw = input; int test = sizeof(zw); #ifdef DEBUG_DETAIL_ON printf("Input Tensor Dimension: %d\n", test); @@ -211,36 +215,39 @@ unsigned char* CTfLiteClass::ReadFileToCharArray(std::string _fn) void CTfLiteClass::LoadModel(std::string _fn){ #ifdef SUPRESS_TFLITE_ERRORS - this->error_reporter = new tflite::OwnMicroErrorReporter; + error_reporter = new tflite::OwnMicroErrorReporter; #else - this->error_reporter = new tflite::MicroErrorReporter; + error_reporter = new tflite::MicroErrorReporter; #endif unsigned char *rd; rd = ReadFileToCharArray(_fn.c_str()); - this->model = tflite::GetModel(rd); + model = tflite::GetModel(rd); free(rd); TFLITE_MINIMAL_CHECK(model != nullptr); + MakeAllocate(); } CTfLiteClass::CTfLiteClass() { - this->model = nullptr; - this->interpreter = nullptr; - this->input = nullptr; - this->output = nullptr; - this->kTensorArenaSize = 200 * 1024; /// laut testfile: 108000 - bisher 600 - this->tensor_arena = new uint8_t[kTensorArenaSize]; + model = nullptr; + interpreter = nullptr; + input = nullptr; + output = nullptr; + kTensorArenaSize = 200 * 1024; /// laut testfile: 108000 - bisher 600 + tensor_arena = (uint8_t*) GET_MEMORY(kTensorArenaSize); + +// tensor_arena = new uint8_t[kTensorArenaSize]; } CTfLiteClass::~CTfLiteClass() { - delete this->tensor_arena; - delete this->interpreter; - delete this->error_reporter; + delete tensor_arena; + delete interpreter; + delete error_reporter; } diff --git a/code/components/jomjol_tfliteclass/CTfLiteClass.h b/code/components/jomjol_tfliteclass/CTfLiteClass.h index c4bd057e..36e7e7e8 100644 --- a/code/components/jomjol_tfliteclass/CTfLiteClass.h +++ b/code/components/jomjol_tfliteclass/CTfLiteClass.h @@ -9,7 +9,7 @@ #include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_interpreter.h" #include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/version.h" +//#include "tensorflow/lite/version.h" #include "tensorflow/lite/micro/kernels/micro_ops.h" #include "esp_err.h" #include "esp_log.h" @@ -41,7 +41,7 @@ class CTfLiteClass const tflite::Model* model; tflite::MicroInterpreter* interpreter; TfLiteTensor* output = nullptr; - static tflite::AllOpsResolver resolver; + tflite::AllOpsResolver resolver; int kTensorArenaSize; uint8_t *tensor_arena; @@ -52,12 +52,12 @@ class CTfLiteClass long GetFileSize(std::string filename); unsigned char* ReadFileToCharArray(std::string _fn); - + + void MakeAllocate(); public: CTfLiteClass(); ~CTfLiteClass(); void LoadModel(std::string _fn); - void MakeAllocate(); void GetInputTensorSize(); bool LoadInputImageBasis(CImageBasis *rs); void Invoke(); diff --git a/code/components/jomjol_tfliteclass/server_tflite.cpp b/code/components/jomjol_tfliteclass/server_tflite.cpp index f29e384c..a1b44f07 100644 --- a/code/components/jomjol_tfliteclass/server_tflite.cpp +++ b/code/components/jomjol_tfliteclass/server_tflite.cpp @@ -128,31 +128,6 @@ void blink_task_doFlow(void *pvParameter) xHandleblink_task_doFlow = NULL; } - -esp_err_t handler_init(httpd_req_t *req) -{ -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_init - Start"); - printf("handler_doinit uri:\n"); printf(req->uri); printf("\n"); -#endif - - char* resp_str = "Init started
"; - httpd_resp_send(req, resp_str, strlen(resp_str)); - - doInit(); - - resp_str = "Init done
"; - httpd_resp_send(req, resp_str, strlen(resp_str)); - /* Respond with an empty chunk to signal HTTP response completion */ - httpd_resp_send_chunk(req, NULL, 0); - -#ifdef DEBUG_DETAIL_ON - LogFile.WriteHeapInfo("handler_init - Done"); -#endif - - return ESP_OK; -}; - esp_err_t handler_doflow(httpd_req_t *req) { #ifdef DEBUG_DETAIL_ON @@ -434,44 +409,6 @@ esp_err_t handler_editflow(httpd_req_t *req) httpd_resp_sendstr_chunk(req, zw.c_str()); } - - if (_task.compare("test_align") == 0) - { - std::string _host = ""; - if (httpd_query_key_value(_query, "host", _valuechar, 30) == ESP_OK) { - _host = std::string(_valuechar); - } -// printf("Parameter host: "); printf(_host.c_str()); printf("\n"); - -// string zwzw = "Do " + _task + " start\n"; printf(zwzw.c_str()); - std::string zw = tfliteflow.doSingleStep("[Alignment]", _host); - httpd_resp_sendstr_chunk(req, zw.c_str()); - } - if (_task.compare("test_analog") == 0) - { - std::string _host = ""; - if (httpd_query_key_value(_query, "host", _valuechar, 30) == ESP_OK) { - _host = std::string(_valuechar); - } -// printf("Parameter host: "); printf(_host.c_str()); printf("\n"); -// string zwzw = "Do " + _task + " start\n"; printf(zwzw.c_str()); - std::string zw = tfliteflow.doSingleStep("[Analog]", _host); - httpd_resp_sendstr_chunk(req, zw.c_str()); - } - if (_task.compare("test_digits") == 0) - { - std::string _host = ""; - if (httpd_query_key_value(_query, "host", _valuechar, 30) == ESP_OK) { - _host = std::string(_valuechar); - } -// printf("Parameter host: "); printf(_host.c_str()); printf("\n"); - -// string zwzw = "Do " + _task + " start\n"; printf(zwzw.c_str()); - std::string zw = tfliteflow.doSingleStep("[Digits]", _host); - httpd_resp_sendstr_chunk(req, zw.c_str()); - } - - /* Respond with an empty chunk to signal HTTP response completion */ httpd_resp_sendstr_chunk(req, NULL); @@ -606,11 +543,6 @@ void register_server_tflite_uri(httpd_handle_t server) httpd_uri_t camuri = { }; camuri.method = HTTP_GET; - camuri.uri = "/doinit"; - camuri.handler = handler_init; - camuri.user_ctx = (void*) "Light On"; - httpd_register_uri_handler(server, &camuri); - camuri.uri = "/setPreValue.html"; camuri.handler = handler_prevalue; camuri.user_ctx = (void*) "Prevalue"; @@ -620,7 +552,6 @@ void register_server_tflite_uri(httpd_handle_t server) camuri.handler = handler_doflow; camuri.user_ctx = (void*) "Light Off"; httpd_register_uri_handler(server, &camuri); - camuri.uri = "/editflow.html"; camuri.handler = handler_editflow; diff --git a/code/components/jomjol_time_sntp/time_sntp.h b/code/components/jomjol_time_sntp/time_sntp.h index 930bfa35..434efe61 100644 --- a/code/components/jomjol_time_sntp/time_sntp.h +++ b/code/components/jomjol_time_sntp/time_sntp.h @@ -9,7 +9,6 @@ #include "esp_log.h" #include "esp_attr.h" #include "esp_sleep.h" -// #include "nvs_flash.h" #include "esp_sntp.h" void setup_time(void); diff --git a/code/components/tfmicro/CMakeLists.txt b/code/components/tfmicro/CMakeLists.txt index f56b874c..11d92936 100644 --- a/code/components/tfmicro/CMakeLists.txt +++ b/code/components/tfmicro/CMakeLists.txt @@ -23,7 +23,7 @@ if(NOT DEFINED ENV{IDF_PATH}) endif() idf_component_register( - SRCS tensorflow/lite/micro/micro_error_reporter.cc tensorflow/lite/micro/simple_memory_allocator.cc tensorflow/lite/micro/memory_helpers.cc tensorflow/lite/micro/test_helpers.cc tensorflow/lite/micro/recording_micro_allocator.cc tensorflow/lite/micro/micro_time.cc tensorflow/lite/micro/recording_simple_memory_allocator.cc tensorflow/lite/micro/micro_string.cc tensorflow/lite/micro/micro_profiler.cc tensorflow/lite/micro/debug_log.cc tensorflow/lite/micro/all_ops_resolver.cc tensorflow/lite/micro/micro_utils.cc tensorflow/lite/micro/micro_interpreter.cc tensorflow/lite/micro/micro_allocator.cc tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc tensorflow/lite/micro/memory_planner/linear_memory_planner.cc tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc tensorflow/lite/micro/testing/test_conv_model.cc tensorflow/lite/c/common.c tensorflow/lite/core/api/error_reporter.cc tensorflow/lite/core/api/flatbuffer_conversions.cc tensorflow/lite/core/api/op_resolver.cc tensorflow/lite/core/api/tensor_utils.cc tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/kernel_util.cc tensorflow/lite/schema/schema_utils.cc tensorflow/lite/micro/kernels/prelu.cc tensorflow/lite/micro/kernels/dequantize.cc tensorflow/lite/micro/kernels/pad.cc tensorflow/lite/micro/kernels/shape.cc tensorflow/lite/micro/kernels/l2norm.cc tensorflow/lite/micro/kernels/tanh.cc tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc tensorflow/lite/micro/kernels/logical.cc tensorflow/lite/micro/kernels/kernel_util.cc tensorflow/lite/micro/kernels/ceil.cc tensorflow/lite/micro/kernels/arg_min_max.cc tensorflow/lite/micro/kernels/softmax.cc tensorflow/lite/micro/kernels/sub.cc tensorflow/lite/micro/kernels/add.cc tensorflow/lite/micro/kernels/floor.cc tensorflow/lite/micro/kernels/kernel_runner.cc tensorflow/lite/micro/kernels/split_v.cc tensorflow/lite/micro/kernels/hard_swish.cc tensorflow/lite/micro/kernels/pooling.cc tensorflow/lite/micro/kernels/concatenation.cc tensorflow/lite/micro/kernels/mul.cc tensorflow/lite/micro/kernels/unpack.cc tensorflow/lite/micro/kernels/round.cc tensorflow/lite/micro/kernels/quantize.cc tensorflow/lite/micro/kernels/ethosu.cc tensorflow/lite/micro/kernels/svdf.cc tensorflow/lite/micro/kernels/maximum_minimum.cc tensorflow/lite/micro/kernels/reshape.cc tensorflow/lite/micro/kernels/reduce.cc tensorflow/lite/micro/kernels/strided_slice.cc tensorflow/lite/micro/kernels/neg.cc tensorflow/lite/micro/kernels/pack.cc tensorflow/lite/micro/kernels/elementwise.cc tensorflow/lite/micro/kernels/comparisons.cc tensorflow/lite/micro/kernels/fully_connected.cc tensorflow/lite/micro/kernels/depthwise_conv.cc tensorflow/lite/micro/kernels/split.cc tensorflow/lite/micro/kernels/logistic.cc tensorflow/lite/micro/kernels/circular_buffer.cc tensorflow/lite/micro/kernels/conv.cc tensorflow/lite/micro/kernels/activations.cc + SRCS tensorflow/lite/micro/simple_memory_allocator.cc tensorflow/lite/micro/micro_error_reporter.cc tensorflow/lite/micro/memory_helpers.cc tensorflow/lite/micro/test_helpers.cc tensorflow/lite/micro/recording_micro_allocator.cc tensorflow/lite/micro/micro_time.cc tensorflow/lite/micro/recording_simple_memory_allocator.cc tensorflow/lite/micro/micro_string.cc tensorflow/lite/micro/micro_profiler.cc tensorflow/lite/micro/debug_log.cc tensorflow/lite/micro/all_ops_resolver.cc tensorflow/lite/micro/micro_utils.cc tensorflow/lite/micro/micro_interpreter.cc tensorflow/lite/micro/micro_allocator.cc tensorflow/lite/micro/system_setup.cc tensorflow/lite/micro/memory_planner/linear_memory_planner.cc tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc tensorflow/lite/c/common.c tensorflow/lite/core/api/error_reporter.cc tensorflow/lite/core/api/flatbuffer_conversions.cc tensorflow/lite/core/api/op_resolver.cc tensorflow/lite/core/api/tensor_utils.cc tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/kernel_util.cc tensorflow/lite/schema/schema_utils.cc tensorflow/lite/micro/kernels/activations.cc tensorflow/lite/micro/kernels/add.cc tensorflow/lite/micro/kernels/add_n.cc tensorflow/lite/micro/kernels/arg_min_max.cc tensorflow/lite/micro/kernels/batch_to_space_nd.cc tensorflow/lite/micro/kernels/cast.cc tensorflow/lite/micro/kernels/ceil.cc tensorflow/lite/micro/kernels/circular_buffer.cc tensorflow/lite/micro/kernels/comparisons.cc tensorflow/lite/micro/kernels/concatenation.cc tensorflow/lite/micro/kernels/conv.cc tensorflow/lite/micro/kernels/conv_common.cc tensorflow/lite/micro/kernels/depthwise_conv.cc tensorflow/lite/micro/kernels/depthwise_conv_common.cc tensorflow/lite/micro/kernels/dequantize.cc tensorflow/lite/micro/kernels/detection_postprocess.cc tensorflow/lite/micro/kernels/div.cc tensorflow/lite/micro/kernels/elementwise.cc tensorflow/lite/micro/kernels/elu.cc tensorflow/lite/micro/kernels/ethosu.cc tensorflow/lite/micro/kernels/exp.cc tensorflow/lite/micro/kernels/expand_dims.cc tensorflow/lite/micro/kernels/fill.cc tensorflow/lite/micro/kernels/floor.cc tensorflow/lite/micro/kernels/fully_connected.cc tensorflow/lite/micro/kernels/fully_connected_common.cc tensorflow/lite/micro/kernels/hard_swish.cc tensorflow/lite/micro/kernels/kernel_runner.cc tensorflow/lite/micro/kernels/kernel_util.cc tensorflow/lite/micro/kernels/l2norm.cc tensorflow/lite/micro/kernels/l2_pool_2d.cc tensorflow/lite/micro/kernels/leaky_relu.cc tensorflow/lite/micro/kernels/logical.cc tensorflow/lite/micro/kernels/logistic.cc tensorflow/lite/micro/kernels/maximum_minimum.cc tensorflow/lite/micro/kernels/mul.cc tensorflow/lite/micro/kernels/neg.cc tensorflow/lite/micro/kernels/pack.cc tensorflow/lite/micro/kernels/pad.cc tensorflow/lite/micro/kernels/pooling.cc tensorflow/lite/micro/kernels/prelu.cc tensorflow/lite/micro/kernels/quantize.cc tensorflow/lite/micro/kernels/quantize_common.cc tensorflow/lite/micro/kernels/reduce.cc tensorflow/lite/micro/kernels/reshape.cc tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc tensorflow/lite/micro/kernels/round.cc tensorflow/lite/micro/kernels/shape.cc tensorflow/lite/micro/kernels/softmax.cc tensorflow/lite/micro/kernels/softmax_common.cc tensorflow/lite/micro/kernels/space_to_batch_nd.cc tensorflow/lite/micro/kernels/split.cc tensorflow/lite/micro/kernels/split_v.cc tensorflow/lite/micro/kernels/squeeze.cc tensorflow/lite/micro/kernels/strided_slice.cc tensorflow/lite/micro/kernels/sub.cc tensorflow/lite/micro/kernels/svdf.cc tensorflow/lite/micro/kernels/svdf_common.cc tensorflow/lite/micro/kernels/tanh.cc tensorflow/lite/micro/kernels/transpose_conv.cc tensorflow/lite/micro/kernels/unpack.cc tensorflow/lite/micro/kernels/zeros_like.cc INCLUDE_DIRS . third_party/gemmlowp third_party/flatbuffers/include third_party/ruy) # Reduce the level of paranoia to be able to compile TF sources @@ -32,7 +32,7 @@ target_compile_options(${COMPONENT_LIB} PRIVATE -Wno-missing-field-initializers -Wno-type-limits) -target_compile_options(${COMPONENT_LIB} PRIVATE -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter) -target_compile_options(${COMPONENT_LIB} PRIVATE $<$: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter >) +target_compile_options(${COMPONENT_LIB} PRIVATE -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP) +target_compile_options(${COMPONENT_LIB} PRIVATE $<$: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP >) target_compile_options(${COMPONENT_LIB} INTERFACE $<$>:-DTF_LITE_STATIC_MEMORY>) target_link_libraries(${COMPONENT_LIB} PRIVATE -lm) diff --git a/code/components/tfmicro/tensorflow/core/public/version.h b/code/components/tfmicro/tensorflow/core/public/version.h deleted file mode 100644 index 08318293..00000000 --- a/code/components/tfmicro/tensorflow/core/public/version.h +++ /dev/null @@ -1,139 +0,0 @@ -/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_CORE_PUBLIC_VERSION_H_ -#define TENSORFLOW_CORE_PUBLIC_VERSION_H_ - -// TensorFlow uses semantic versioning, see http://semver.org/. - -// Also update tensorflow/tensorflow.bzl and -// tensorflow/tools/pip_package/setup.py -#define TF_MAJOR_VERSION 2 -#define TF_MINOR_VERSION 5 -#define TF_PATCH_VERSION 0 - -// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", -// "-beta", "-rc", "-rc.1") -#define TF_VERSION_SUFFIX "" - -#define TF_STR_HELPER(x) #x -#define TF_STR(x) TF_STR_HELPER(x) - -// e.g. "0.5.0" or "0.6.0-alpha". -#define TF_VERSION_STRING \ - (TF_STR(TF_MAJOR_VERSION) "." TF_STR(TF_MINOR_VERSION) "." TF_STR( \ - TF_PATCH_VERSION) TF_VERSION_SUFFIX) - -// GraphDef compatibility versions (the versions field in graph.proto). -// -// Each graph has producer and min_consumer versions, and each -// consumer has its own version and a min_producer. In addition, graphs can -// mark specific consumer versions as bad (to prevent bugs from executing). -// A consumer will execute a graph if the consumer's version is at least the -// graph's min_consumer, the graph's producer version is at least the consumer's -// min_producer, and the consumer version isn't specifically disallowed by the -// graph. -// -// By default, newly created graphs have producer version TF_GRAPH_DEF_VERSION -// min_consumer TF_GRAPH_DEF_MIN_CONSUMER, and no other bad consumer versions. -// -// Version history: -// -// 0. Graphs created before GraphDef versioning -// 1. First real version (2dec2015) -// 2. adjust_contrast only takes float, doesn't perform clamping (11dec2015) -// 3. Remove TileGrad, since it was equivalent to reduce_sum (30dec2015) -// 4. When support for this version is removed, we can safely make AttrValue -// parsing more strict with respect to empty list values (see -// 111635679, 7jan2016). -// 5. Graphs are wholly-validated during Session::Create() (7jan2016). -// 6. TensorFlow is scalar strict within Google (27jan2016). -// 7. Remove TopK in favor of TopKV2 (5feb2016). -// 8. Replace RandomCrop from C++ with pure Python (5feb2016). -// 9. Deprecate batch_norm_with_global_normalization (16feb2016). -// 10. Deprecate conv3d_backprop_{filter,input} (10jun2016). -// 11. Deprecate {batch}_self_adjoint_eig (3aug2016). -// 12. Graph consumers understand the node_def field of FunctionDef (22aug2016). -// 13. Deprecate multiple batch linear algebra ops (9sep2016). -// 14. Deprecate batch_matrix_* ops. (10sep2016). -// 15. Deprecate batch_fft_* ops. (14sep2016). -// 16. Deprecate tensor_array (v1) ops in favor of v2 (10nov2016). -// 17. Deprecate inv (11nov2016). -// 17. Expose reverse_v2 (10nov2016) -// 18. Add VariableV2 (30nov2016) -// 19. Deprecated ops created by models moved out of core SkipGram, NegTrain. -// (08dec2016) -// 20. Catch all version 1.0 changes to Python API generation. SplitV is now -// used for tf.split, ReverseV2 is now used by tf.reverse, ConcatV2 is -// now used by tf.concat. Graphs use flooring -// division and mod semantics. TensorArrayV3. (12dec2016) -// Also considered the version for when it is required for reduction -// ops' indices to be scalar or vector, and not higher rank. -// Some earlier graph def versions allowed this. -// 21. Dropped FunctionDef.Node support, switched to node_def introduced -// in version 12. (11jan2017) -// 22. Placeholder now can specify and enforce scalar and partial -// shapes, particularly when restoring a graph from GraphDef -// produced at version 22 or later. (04/10/2016) -// 23. Remove NonMaxSuppression in favor of NonMaxSuppressionV2. -// 24. Deprecate lookup ops (v1) ops in favor of v2 (30may2017) -// 25. Deprecate stack (v1) ops in favor of v2 (2017/6/15). -// 25. Deprecate RandomPoisson (v1) ops in favor of v2 (2017/10/25). -// 26. Add a bool 'stripped_default_attrs' to MetaInfoDef indicating -// whether default-valued attrs have been stripped from the nodes in the -// GraphDef. (7dec2017) -// 27. Deprecate TensorArray ops v2 in favor of v3 and deprecated io_ops -// deprecated in favor of V2 ops. (2018/01/23) -// 28. Deprecate MatrixExponential op in favor of Python implementation. -// (2018/08/21). -// (2019/02/15). Added `control_ret` field to FunctionDef proto, and -// `control_output` field to OpDef proto. -// 29. Deprecate StatefulStandardNormal op in favor of StatefulStandardNormalV2. -// (2019/03/25). -// (2019/04/17). Added `arg_attr` field to FunctionDefProto. -// 30. (2019/05/09) First date based GraphDef version. GraphDef -// versions advance by 1 each day after this point. - -#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 -#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 -#define TF_GRAPH_DEF_VERSION 578 // Updated: 2020/11/7 - -// Checkpoint compatibility versions (the versions field in SavedSliceMeta). -// -// The checkpoint versions have the same semantics as GraphDef versions, but the -// numbering scheme is separate. We have no plans to ever deprecate checkpoint -// versions, but it's good to have this in place in case we ever need to. -// -// Version history: -// -// 0. Checkpoints saved before checkpoint versioning. -// 1. First real version (10feb2015). -#define TF_CHECKPOINT_VERSION_MIN_PRODUCER 0 -#define TF_CHECKPOINT_VERSION_MIN_CONSUMER 0 -#define TF_CHECKPOINT_VERSION 1 - -/// Version query functions (defined in generated version_info.cc) - -// Host compiler version (declared elsewhere to be __VERSION__) -extern const char* tf_compiler_version(); -// The git commit designator when tensorflow was built -// If no git repository, this will be "internal". -extern const char* tf_git_version(); -// Value of the _GLIBCXX_USE_CXX11_ABI flag, or 0 if it's not set. -extern int tf_cxx11_abi_flag(); -// Returns 1 if build is monolithic, or 0 otherwise. -extern int tf_monolithic_build(); - -#endif // TENSORFLOW_CORE_PUBLIC_VERSION_H_ diff --git a/code/components/tfmicro/tensorflow/lite/c/builtin_op_data.h b/code/components/tfmicro/tensorflow/lite/c/builtin_op_data.h index 5452ef63..a0167c3f 100644 --- a/code/components/tfmicro/tensorflow/lite/c/builtin_op_data.h +++ b/code/components/tfmicro/tensorflow/lite/c/builtin_op_data.h @@ -67,9 +67,8 @@ typedef struct { typedef enum { kTfLiteActNone = 0, kTfLiteActRelu, - kTfLiteActReluN1To1, // min(max(-1, x), 1) - kTfLiteActRelu1 = kTfLiteActReluN1To1, // kTfLiteActRelu1 will be deprecated. - kTfLiteActRelu6, // min(max(0, x), 6) + kTfLiteActReluN1To1, // min(max(-1, x), 1) + kTfLiteActRelu6, // min(max(0, x), 6) kTfLiteActTanh, kTfLiteActSignBit, kTfLiteActSigmoid, @@ -88,6 +87,17 @@ typedef struct { int dilation_height_factor; } TfLiteConvParams; +typedef struct { + TfLitePadding padding; + int stride_width; + int stride_height; + int stride_depth; + int dilation_width_factor; + int dilation_height_factor; + int dilation_depth_factor; + TfLiteFusedActivation activation; +} TfLiteConv3DParams; + typedef struct { TfLitePadding padding; int stride_width; @@ -214,6 +224,10 @@ typedef struct { typedef struct { bool adj_x; bool adj_y; + // Parameters for BatchMatMul version 4 or above. + // If set to true and the weights are quantized, then non constant inputs + // are quantized at evaluation time with asymmetric quantization. + bool asymmetric_quantize_inputs; } TfLiteBatchMatMulParams; typedef struct { @@ -351,6 +365,7 @@ typedef struct { typedef struct { int axis; + int batch_dims; } TfLiteGatherParams; typedef struct { @@ -474,6 +489,12 @@ typedef struct { int init_subgraph_index; } TfLiteCallOnceParams; +typedef struct { + int table_id; + TfLiteType key_dtype; + TfLiteType value_dtype; +} TfLiteHashtableParams; + #ifdef __cplusplus } // extern "C" #endif // __cplusplus diff --git a/code/components/tfmicro/tensorflow/lite/c/c_api_types.h b/code/components/tfmicro/tensorflow/lite/c/c_api_types.h new file mode 100644 index 00000000..01284778 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/c/c_api_types.h @@ -0,0 +1,95 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file declares types used by the pure C inference API defined in c_api.h, +// some of which are also used in the C++ and C kernel and interpreter APIs. + +#ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_ +#define TENSORFLOW_LITE_C_C_API_TYPES_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Define TFL_CAPI_EXPORT macro to export a function properly with a shared +// library. +#ifdef SWIG +#define TFL_CAPI_EXPORT +#else +#if defined(_WIN32) +#ifdef TFL_COMPILE_LIBRARY +#define TFL_CAPI_EXPORT __declspec(dllexport) +#else +#define TFL_CAPI_EXPORT __declspec(dllimport) +#endif // TFL_COMPILE_LIBRARY +#else +#define TFL_CAPI_EXPORT __attribute__((visibility("default"))) +#endif // _WIN32 +#endif // SWIG + +typedef enum TfLiteStatus { + kTfLiteOk = 0, + + // Generally referring to an error in the runtime (i.e. interpreter) + kTfLiteError = 1, + + // Generally referring to an error from a TfLiteDelegate itself. + kTfLiteDelegateError = 2, + + // Generally referring to an error in applying a delegate due to + // incompatibility between runtime and delegate, e.g., this error is returned + // when trying to apply a TfLite delegate onto a model graph that's already + // immutable. + kTfLiteApplicationError = 3 +} TfLiteStatus; + +// Types supported by tensor +typedef enum { + kTfLiteNoType = 0, + kTfLiteFloat32 = 1, + kTfLiteInt32 = 2, + kTfLiteUInt8 = 3, + kTfLiteInt64 = 4, + kTfLiteString = 5, + kTfLiteBool = 6, + kTfLiteInt16 = 7, + kTfLiteComplex64 = 8, + kTfLiteInt8 = 9, + kTfLiteFloat16 = 10, + kTfLiteFloat64 = 11, + kTfLiteComplex128 = 12, + kTfLiteUInt64 = 13, + kTfLiteResource = 14, + kTfLiteVariant = 15, + kTfLiteUInt32 = 16, +} TfLiteType; + +// Legacy. Will be deprecated in favor of TfLiteAffineQuantization. +// If per-layer quantization is specified this field will still be populated in +// addition to TfLiteAffineQuantization. +// Parameters for asymmetric quantization. Quantized values can be converted +// back to float using: +// real_value = scale * (quantized_value - zero_point) +typedef struct TfLiteQuantizationParams { + float scale; + int32_t zero_point; +} TfLiteQuantizationParams; + +#ifdef __cplusplus +} // extern C +#endif +#endif // TENSORFLOW_LITE_C_C_API_TYPES_H_ diff --git a/code/components/tfmicro/tensorflow/lite/c/common.c b/code/components/tfmicro/tensorflow/lite/c/common.c index 0264f420..aaa98a98 100644 --- a/code/components/tfmicro/tensorflow/lite/c/common.c +++ b/code/components/tfmicro/tensorflow/lite/c/common.c @@ -14,6 +14,8 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/c/c_api_types.h" + #ifndef TF_LITE_STATIC_MEMORY #include #include @@ -197,12 +199,16 @@ const char* TfLiteTypeGetName(TfLiteType type) { return "INT16"; case kTfLiteInt32: return "INT32"; + case kTfLiteUInt32: + return "UINT32"; case kTfLiteUInt8: return "UINT8"; case kTfLiteInt8: return "INT8"; case kTfLiteInt64: return "INT64"; + case kTfLiteUInt64: + return "UINT64"; case kTfLiteBool: return "BOOL"; case kTfLiteComplex64: @@ -215,6 +221,10 @@ const char* TfLiteTypeGetName(TfLiteType type) { return "FLOAT16"; case kTfLiteFloat64: return "FLOAT64"; + case kTfLiteResource: + return "RESOURCE"; + case kTfLiteVariant: + return "VARIANT"; } return "Unknown type"; } diff --git a/code/components/tfmicro/tensorflow/lite/c/common.h b/code/components/tfmicro/tensorflow/lite/c/common.h index e04e1a12..56e0f8d5 100644 --- a/code/components/tfmicro/tensorflow/lite/c/common.h +++ b/code/components/tfmicro/tensorflow/lite/c/common.h @@ -40,26 +40,12 @@ limitations under the License. #include #include +#include "tensorflow/lite/c/c_api_types.h" // IWYU pragma: export + #ifdef __cplusplus extern "C" { #endif // __cplusplus -typedef enum TfLiteStatus { - kTfLiteOk = 0, - - // Generally referring to an error in the runtime (i.e. interpreter) - kTfLiteError = 1, - - // Generally referring to an error from a TfLiteDelegate itself. - kTfLiteDelegateError = 2, - - // Generally referring to an error in applying a delegate due to - // incompatibility between runtime and delegate, e.g., this error is returned - // when trying to apply a TfLite delegate onto a model graph that's already - // immutable. - kTfLiteApplicationError = 3 -} TfLiteStatus; - // The list of external context types known to TF Lite. This list exists solely // to avoid conflicts and to ensure ops can share the external contexts they // need. Access to the external contexts is controlled by one of the @@ -80,7 +66,7 @@ struct TfLiteRegistration; // An external context is a collection of information unrelated to the TF Lite // framework, but useful to a subset of the ops. TF Lite knows very little -// about about the actual contexts, but it keeps a list of them, and is able to +// about the actual contexts, but it keeps a list of them, and is able to // refresh them if configurations like the number of recommended threads // change. typedef struct TfLiteExternalContext { @@ -98,7 +84,8 @@ typedef struct TfLiteIntArray { // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c #if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ __GNUC_MINOR__ >= 1) || \ - defined(HEXAGON) || (__clang_major__ == 7 && __clang_minor__ == 1) + defined(HEXAGON) || \ + (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) int data[0]; #else int data[]; @@ -254,22 +241,6 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a); } \ } while (0) -// Define TFL_CAPI_EXPORT macro to export a function properly with a shared -// library. -#ifdef SWIG -#define TFL_CAPI_EXPORT -#else -#if defined(_WIN32) -#ifdef TFL_COMPILE_LIBRARY -#define TFL_CAPI_EXPORT __declspec(dllexport) -#else -#define TFL_CAPI_EXPORT __declspec(dllimport) -#endif // TFL_COMPILE_LIBRARY -#else -#define TFL_CAPI_EXPORT __attribute__((visibility("default"))) -#endif // _WIN32 -#endif // SWIG - // Single-precision complex data type compatible with the C99 definition. typedef struct TfLiteComplex64 { float re, im; // real and imaginary parts, respectively. @@ -285,23 +256,6 @@ typedef struct TfLiteFloat16 { uint16_t data; } TfLiteFloat16; -// Types supported by tensor -typedef enum { - kTfLiteNoType = 0, - kTfLiteFloat32 = 1, - kTfLiteInt32 = 2, - kTfLiteUInt8 = 3, - kTfLiteInt64 = 4, - kTfLiteString = 5, - kTfLiteBool = 6, - kTfLiteInt16 = 7, - kTfLiteComplex64 = 8, - kTfLiteInt8 = 9, - kTfLiteFloat16 = 10, - kTfLiteFloat64 = 11, - kTfLiteComplex128 = 12, -} TfLiteType; - // Return the name of a given type, for error reporting purposes. const char* TfLiteTypeGetName(TfLiteType type); @@ -318,22 +272,12 @@ typedef enum TfLiteQuantizationType { typedef struct TfLiteQuantization { // The type of quantization held by params. TfLiteQuantizationType type; - // Holds a reference to one of the quantization param structures specified - // below. + // Holds an optional reference to a quantization param structure. The actual + // type depends on the value of the `type` field (see the comment there for + // the values and corresponding types). void* params; } TfLiteQuantization; -// Legacy. Will be deprecated in favor of TfLiteAffineQuantization. -// If per-layer quantization is specified this field will still be populated in -// addition to TfLiteAffineQuantization. -// Parameters for asymmetric quantization. Quantized values can be converted -// back to float using: -// real_value = scale * (quantized_value - zero_point) -typedef struct TfLiteQuantizationParams { - float scale; - int32_t zero_point; -} TfLiteQuantizationParams; - // Parameters for asymmetric quantization across a dimension (i.e per output // channel quantization). // quantized_dimension specifies which dimension the scales and zero_points @@ -353,7 +297,9 @@ typedef union TfLitePtrUnion { * GetTensorData(tensor) instead, otherwise only access .data, as other * members are deprecated. */ int32_t* i32; + uint32_t* u32; int64_t* i64; + uint64_t* u64; float* f; TfLiteFloat16* f16; double* f64; @@ -430,6 +376,17 @@ typedef struct TfLiteCustomAllocation { size_t bytes; } TfLiteCustomAllocation; +// The flags used in `Interpreter::SetCustomAllocationForTensor`. +// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. +typedef enum TfLiteCustomAllocationFlags { + kTfLiteCustomAllocationFlagsNone = 0, + // Skips checking whether allocation.data points to an aligned buffer as + // expected by the TFLite runtime. + // NOTE: Setting this flag can cause crashes when calling Invoke(). + // Use with caution. + kTfLiteCustomAllocationFlagsSkipAlignCheck = 1, +} TfLiteCustomAllocationFlags; + // A tensor in the interpreter system which is a wrapper around a buffer of // data including a dimensionality (or NULL if not currently defined). #ifndef TF_LITE_STATIC_MEMORY @@ -534,7 +491,7 @@ typedef struct TfLiteNode { // WARNING: This is an experimental interface that is subject to change. struct TfLiteDelegate* delegate; } TfLiteNode; -#else // defined(TF_LITE_STATIC_MEMORY)? +#else // defined(TF_LITE_STATIC_MEMORY)? // NOTE: This flag is opt-in only at compile time. // // Specific reduced TfLiteTensor struct for TF Micro runtime. This struct diff --git a/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc b/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc index 16118d41..7721b946 100644 --- a/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc +++ b/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc @@ -169,6 +169,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseAdd(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_ADD_N: { + return ParseAddN(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_ARG_MAX: { return ParseArgMax(op, error_reporter, allocator, builtin_data); } @@ -181,6 +185,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParsePool(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_BATCH_MATMUL: { + return ParseBatchMatMul(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_BATCH_TO_SPACE_ND: { + return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_CEIL: { return ParseCeil(op, error_reporter, allocator, builtin_data); } @@ -193,6 +205,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseConv2D(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_CUMSUM: { + return ParseCumsum(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_DEPTH_TO_SPACE: { + return ParseDepthToSpace(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_DEPTHWISE_CONV_2D: { return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data); } @@ -201,14 +221,46 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseDequantize(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_DIV: { + return ParseDiv(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_ELU: { + return ParseElu(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_EXP: { + return ParseExp(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_EXPAND_DIMS: { + return ParseExpandDims(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_FILL: { + return ParseFill(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_FLOOR: { return ParseFloor(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_FLOOR_DIV: { + return ParseFloorDiv(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_FLOOR_MOD: { + return ParseFloorMod(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_FULLY_CONNECTED: { return ParseFullyConnected(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_GATHER_ND: { + return ParseGatherNd(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_GREATER: { return ParseGreater(op, error_reporter, allocator, builtin_data); } @@ -229,6 +281,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParsePool(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_LEAKY_RELU: { + return ParseLeakyRelu(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_LESS: { return ParseLess(op, error_reporter, allocator, builtin_data); } @@ -257,6 +313,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseLogistic(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_LOG_SOFTMAX: { + return ParseLogSoftmax(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_MAXIMUM: { return ParseMaximum(op, error_reporter, allocator, builtin_data); } @@ -297,6 +357,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParsePadV2(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_POW: { + return ParsePow(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_PRELU: { return ParsePrelu(op, error_reporter, allocator, builtin_data); } @@ -362,6 +426,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseSoftmax(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_SPACE_TO_BATCH_ND: { + return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_SPACE_TO_DEPTH: { + return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_SPLIT: { return ParseSplit(op, error_reporter, allocator, builtin_data); } @@ -378,6 +450,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseSquare(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_SQUEEZE: { + return ParseSqueeze(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_STRIDED_SLICE: { return ParseStridedSlice(op, error_reporter, allocator, builtin_data); } @@ -398,23 +474,20 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseTanh(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_TRANSPOSE_CONV: { + return ParseTransposeConv(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_UNPACK: { return ParseUnpack(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_ZEROS_LIKE: { + return ParseZerosLike(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_CAST: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_CastOptions()) { - TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->in_data_type(), - ¶ms->in_data_type, - error_reporter)); - TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(), - ¶ms->out_data_type, - error_reporter)); - } - *builtin_data = params.release(); - return kTfLiteOk; + return ParseCast(op, error_reporter, allocator, builtin_data); } case BuiltinOperator_LSH_PROJECTION: { auto params = safe_allocator.Allocate(); @@ -483,16 +556,7 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, case BuiltinOperator_HASHTABLE_LOOKUP: // no-op. return kTfLiteOk; - case BuiltinOperator_DIV: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_DivOptions()) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } - *builtin_data = params.release(); - return kTfLiteOk; - } + case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: { auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); @@ -584,66 +648,9 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_SPACE_TO_DEPTH: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_SpaceToDepthOptions()) { - params->block_size = schema_params->block_size(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_DEPTH_TO_SPACE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_DepthToSpaceOptions()) { - params->block_size = schema_params->block_size(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } + case BuiltinOperator_GATHER: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - params->axis = 0; - if (const auto* gather_params = op->builtin_options_as_GatherOptions()) { - params->axis = gather_params->axis(); - } - - *builtin_data = params.release(); - return kTfLiteOk; - } - - case BuiltinOperator_SQUEEZE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_SqueezeOptions()) { - const auto* squeeze_dims = schema_params->squeeze_dims(); - if (squeeze_dims != nullptr) { - TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( - sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims, - error_reporter, "squeeze")); - params->num_squeeze_dims = squeeze_dims->size(); - } else { - params->num_squeeze_dims = 0; - } - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_TRANSPOSE_CONV: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* transpose_conv_params = - op->builtin_options_as_TransposeConvOptions()) { - params->padding = ConvertPadding(transpose_conv_params->padding()); - params->stride_width = transpose_conv_params->stride_w(); - params->stride_height = transpose_conv_params->stride_h(); - } - *builtin_data = params.release(); - return kTfLiteOk; + return ParseGather(op, error_reporter, allocator, builtin_data); } case BuiltinOperator_SPARSE_TO_DENSE: { auto params = safe_allocator.Allocate(); @@ -683,16 +690,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_LEAKY_RELU: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* leaky_relu_params = - op->builtin_options_as_LeakyReluOptions()) { - params->alpha = leaky_relu_params->alpha(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } case BuiltinOperator_MIRROR_PAD: { auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); @@ -750,17 +747,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_BATCH_MATMUL: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bmm_params = - op->builtin_options_as_BatchMatMulOptions()) { - params->adj_x = bmm_params->adj_x(); - params->adj_y = bmm_params->adj_y(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } case BuiltinOperator_CALL_ONCE: { auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); @@ -771,50 +757,59 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_CUMSUM: { - auto params = safe_allocator.Allocate(); + case BuiltinOperator_CONV_3D: { + auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) { - params->exclusive = cumsum_params->exclusive(); - params->reverse = cumsum_params->reverse(); + if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) { + params->padding = ConvertPadding(conv3d_params->padding()); + params->activation = + ConvertActivation(conv3d_params->fused_activation_function()); + params->stride_depth = conv3d_params->stride_d(); + params->stride_height = conv3d_params->stride_h(); + params->stride_width = conv3d_params->stride_w(); + params->dilation_depth_factor = conv3d_params->dilation_d_factor(); + params->dilation_height_factor = conv3d_params->dilation_h_factor(); + params->dilation_width_factor = conv3d_params->dilation_w_factor(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_HASHTABLE: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* hashtable_params = + op->builtin_options_as_HashtableOptions()) { + params->table_id = hashtable_params->table_id(); + TF_LITE_ENSURE_STATUS(ConvertTensorType( + hashtable_params->key_dtype(), ¶ms->key_dtype, error_reporter)); + TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(), + ¶ms->value_dtype, + error_reporter)); } *builtin_data = params.release(); return kTfLiteOk; } // Below are the ops with no builtin_data structure. - case BuiltinOperator_BATCH_TO_SPACE_ND: // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are // ok for now, since there is no call implementation either. case BuiltinOperator_CALL: case BuiltinOperator_CONCAT_EMBEDDINGS: case BuiltinOperator_COS: case BuiltinOperator_CUSTOM: - case BuiltinOperator_ELU: case BuiltinOperator_EMBEDDING_LOOKUP: case BuiltinOperator_EQUAL: - case BuiltinOperator_EXP: - case BuiltinOperator_EXPAND_DIMS: - case BuiltinOperator_LOG_SOFTMAX: case BuiltinOperator_MATRIX_DIAG: case BuiltinOperator_MATRIX_SET_DIAG: case BuiltinOperator_RELU_N1_TO_1: case BuiltinOperator_SELECT: case BuiltinOperator_SELECT_V2: case BuiltinOperator_SLICE: - case BuiltinOperator_SPACE_TO_BATCH_ND: case BuiltinOperator_TILE: case BuiltinOperator_TOPK_V2: case BuiltinOperator_TRANSPOSE: - case BuiltinOperator_POW: - case BuiltinOperator_FLOOR_DIV: - case BuiltinOperator_ZEROS_LIKE: - case BuiltinOperator_FILL: - case BuiltinOperator_FLOOR_MOD: case BuiltinOperator_RANGE: case BuiltinOperator_SQUARED_DIFFERENCE: case BuiltinOperator_REVERSE_V2: - case BuiltinOperator_ADD_N: - case BuiltinOperator_GATHER_ND: case BuiltinOperator_WHERE: case BuiltinOperator_RANK: case BuiltinOperator_NON_MAX_SUPPRESSION_V4: @@ -823,6 +818,13 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, case BuiltinOperator_DENSIFY: case BuiltinOperator_SEGMENT_SUM: case BuiltinOperator_BROADCAST_TO: + case BuiltinOperator_RFFT2D: + case BuiltinOperator_IMAG: + case BuiltinOperator_REAL: + case BuiltinOperator_COMPLEX_ABS: + case BuiltinOperator_HASHTABLE_FIND: + case BuiltinOperator_HASHTABLE_IMPORT: + case BuiltinOperator_HASHTABLE_SIZE: return kTfLiteOk; case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES: return kTfLiteError; @@ -850,6 +852,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_INT32: *type = kTfLiteInt32; return kTfLiteOk; + case TensorType_UINT32: + *type = kTfLiteUInt32; + return kTfLiteOk; case TensorType_UINT8: *type = kTfLiteUInt8; return kTfLiteOk; @@ -859,6 +864,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_INT64: *type = kTfLiteInt64; return kTfLiteOk; + case TensorType_UINT64: + *type = kTfLiteUInt64; + return kTfLiteOk; case TensorType_STRING: *type = kTfLiteString; return kTfLiteOk; @@ -871,6 +879,12 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_COMPLEX128: *type = kTfLiteComplex128; return kTfLiteOk; + case TensorType_RESOURCE: + *type = kTfLiteResource; + return kTfLiteOk; + case TensorType_VARIANT: + *type = kTfLiteVariant; + return kTfLiteOk; default: *type = kTfLiteNoType; TF_LITE_REPORT_ERROR(error_reporter, @@ -912,6 +926,11 @@ TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + return kTfLiteOk; +} + TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { CheckParsePointerParams(op, error_reporter, allocator, builtin_data); @@ -962,6 +981,56 @@ TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) { + params->adj_x = bmm_params->adj_x(); + params->adj_y = bmm_params->adj_y(); + params->asymmetric_quantize_inputs = + bmm_params->asymmetric_quantize_inputs(); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* schema_params = op->builtin_options_as_CastOptions()) { + TF_LITE_ENSURE_STATUS(ConvertTensorType( + schema_params->in_data_type(), ¶ms->in_data_type, error_reporter)); + TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(), + ¶ms->out_data_type, + error_reporter)); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1030,6 +1099,24 @@ TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) { + params->exclusive = cumsum_params->exclusive(); + params->reverse = cumsum_params->reverse(); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1038,6 +1125,31 @@ TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +TfLiteStatus ParseDepthToSpace(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions(); + if (schema_params != nullptr) { + params->block_size = schema_params->block_size(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + TfLiteStatus ParseDepthwiseConv2D(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -1082,6 +1194,29 @@ TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*, return kTfLiteOk; } +TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* schema_params = op->builtin_options_as_DivOptions()) { + params->activation = + ConvertActivation(schema_params->fused_activation_function()); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1090,6 +1225,30 @@ TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1098,6 +1257,22 @@ TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseFullyConnected(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -1144,6 +1319,35 @@ TfLiteStatus ParseFullyConnected(const Operator* op, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + params->axis = 0; + params->batch_dims = 0; + if (const auto* gather_params = op->builtin_options_as_GatherOptions()) { + params->axis = gather_params->axis(); + params->batch_dims = gather_params->batch_dims(); + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1195,6 +1399,22 @@ TfLiteStatus ParseL2Normalization(const Operator* op, return kTfLiteOk; } +TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* leaky_relu_params = + op->builtin_options_as_LeakyReluOptions()) { + params->alpha = leaky_relu_params->alpha(); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1251,6 +1471,14 @@ TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1378,6 +1606,14 @@ TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1599,6 +1835,39 @@ TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseSpaceToDepth(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions(); + if (schema_params != nullptr) { + params->block_size = schema_params->block_size(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { CheckParsePointerParams(op, error_reporter, allocator, builtin_data); @@ -1647,6 +1916,39 @@ TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + SafeBuiltinDataAllocator safe_allocator(allocator); + + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions(); + + if (schema_params != nullptr) { + const auto* squeeze_dims = schema_params->squeeze_dims(); + if (squeeze_dims != nullptr) { + TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( + sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims, + error_reporter, "squeeze")); + params->num_squeeze_dims = squeeze_dims->size(); + } else { + params->num_squeeze_dims = 0; + } + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1753,6 +2055,40 @@ TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*, void**) { return kTfLiteOk; } +// +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseTransposeConv(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + const TransposeConvOptions* transpose_conv_params = + op->builtin_options_as_TransposeConvOptions(); + if (transpose_conv_params != nullptr) { + params->padding = ConvertPadding(transpose_conv_params->padding()); + params->stride_width = transpose_conv_params->stride_w(); + params->stride_height = transpose_conv_params->stride_h(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + *builtin_data = params.release(); + return kTfLiteOk; +} TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { @@ -1779,6 +2115,14 @@ TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { diff --git a/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h b/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h index 13680997..b4a6883b 100644 --- a/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h +++ b/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h @@ -75,15 +75,30 @@ TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseBatchToSpaceNd(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseConcatenation(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -95,6 +110,14 @@ TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseDepthToSpace(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseDepthwiseConv2D(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -104,17 +127,48 @@ TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseElu(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseExp(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseExpandDims(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseFill(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseFloorDiv(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseFloorMod(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseFullyConnected(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseGatherNd(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -132,6 +186,10 @@ TfLiteStatus ParseL2Normalization(const Operator* op, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -158,6 +216,10 @@ TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseLogSoftmax(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -186,6 +248,9 @@ TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParsePow(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -230,12 +295,25 @@ TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSpaceToBatchNd(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseSpaceToDepth(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -256,9 +334,22 @@ TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseTranspose(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseTransposeConv(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + } // namespace tflite #endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ diff --git a/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.cc b/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.cc index c5dffb63..04ebd9a7 100644 --- a/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.cc +++ b/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.cc @@ -43,7 +43,9 @@ TfLiteStatus GetRegistrationFromOpCode( if (*registration == nullptr) { TF_LITE_REPORT_ERROR( error_reporter, - "Didn't find op for builtin opcode '%s' version '%d'\n", + "Didn't find op for builtin opcode '%s' version '%d'. " + "An older version of this builtin might be supported. " + "Are you using an old TFLite binary with a newer model?\n", EnumNameBuiltinOperator(builtin_code), version); status = kTfLiteError; } diff --git a/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.h b/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.h index b6a8171d..f43c6ba5 100644 --- a/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.h +++ b/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.h @@ -15,6 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ +#include #include #include "tensorflow/lite/c/common.h" diff --git a/code/components/tfmicro/tensorflow/lite/core/api/profiler.h b/code/components/tfmicro/tensorflow/lite/core/api/profiler.h deleted file mode 100644 index 897efbe1..00000000 --- a/code/components/tfmicro/tensorflow/lite/core/api/profiler.h +++ /dev/null @@ -1,194 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_ -#define TENSORFLOW_LITE_CORE_API_PROFILER_H_ - -#include - -namespace tflite { - -// A simple utility for enabling profiled event tracing in TensorFlow Lite. -class Profiler { - public: - // As certain Profiler instance might be only interested in certain event - // types, we define each event type value to allow a Profiler to use - // bitmasking bitwise operations to determine whether an event should be - // recorded or not. - enum class EventType { - // Default event type, the metadata field has no special significance. - DEFAULT = 1, - - // The event is an operator invocation and the event_metadata field is the - // index of operator node. - OPERATOR_INVOKE_EVENT = 2, - - // The event is an invocation for an internal operator of a TFLite delegate. - // The event_metadata field is the index of operator node that's specific to - // the delegate. - DELEGATE_OPERATOR_INVOKE_EVENT = 4, - - // The event is a recording of runtime instrumentation such as the overall - // TFLite runtime status, the TFLite delegate status (if a delegate - // is applied), and the overall model inference latency etc. - // Note, the delegate status and overall status are stored as separate - // event_metadata fields. In particular, the delegate status is encoded - // as DelegateStatus::full_status(). - GENERAL_RUNTIME_INSTRUMENTATION_EVENT = 8, - }; - - virtual ~Profiler() {} - - // Signals the beginning of an event and returns a handle to the profile - // event. The `event_metadata1` and `event_metadata2` have different - // interpretations based on the actual Profiler instance and the `event_type`. - // For example, as for the 'SubgraphAwareProfiler' defined in - // lite/core/subgraph.h, when the event_type is OPERATOR_INVOKE_EVENT, - // `event_metadata1` represents the index of a TFLite node, and - // `event_metadata2` represents the index of the subgraph that this event - // comes from. - virtual uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) = 0; - // Similar w/ the above, but `event_metadata2` defaults to 0. - uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata) { - return BeginEvent(tag, event_type, event_metadata, /*event_metadata2*/ 0); - } - - // Signals an end to the specified profile event with 'event_metadata's, This - // is useful when 'event_metadata's are not available when the event begins - // or when one wants to overwrite the 'event_metadata's set at the beginning. - virtual void EndEvent(uint32_t event_handle, int64_t event_metadata1, - int64_t event_metadata2) {} - // Signals an end to the specified profile event. - virtual void EndEvent(uint32_t event_handle) = 0; - - // Appends an event of type 'event_type' with 'tag' and 'event_metadata' - // which started at 'start' and ended at 'end' - // Note: - // In cases were ProfileSimmarizer and tensorflow::StatsCalculator are used - // they assume the value is in "usec", if in any case subclasses - // didn't put usec, then the values are not meaningful. - // TODO karimnosseir: Revisit and make the function more clear. - void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata) { - AddEvent(tag, event_type, start, end, event_metadata, - /*event_metadata2*/ 0); - } - - virtual void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata1, - int64_t event_metadata2) {} - - protected: - friend class ScopedProfile; -}; - -// Adds a profile event to `profiler` that begins with the construction -// of the object and ends when the object goes out of scope. -// The lifetime of tag should be at least the lifetime of `profiler`. -// `profiler` may be null, in which case nothing is profiled. -class ScopedProfile { - public: - ScopedProfile(Profiler* profiler, const char* tag, - Profiler::EventType event_type = Profiler::EventType::DEFAULT, - int64_t event_metadata = 0) - : profiler_(profiler), event_handle_(0) { - if (profiler) { - event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata); - } - } - - ~ScopedProfile() { - if (profiler_) { - profiler_->EndEvent(event_handle_); - } - } - - protected: - Profiler* profiler_; - uint32_t event_handle_; -}; - -class ScopedOperatorProfile : public ScopedProfile { - public: - ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index) - : ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT, - static_cast(node_index)) {} -}; - -class ScopedDelegateOperatorProfile : public ScopedProfile { - public: - ScopedDelegateOperatorProfile(Profiler* profiler, const char* tag, - int node_index) - : ScopedProfile(profiler, tag, - Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT, - static_cast(node_index)) {} -}; - -class ScopedRuntimeInstrumentationProfile : public ScopedProfile { - public: - ScopedRuntimeInstrumentationProfile(Profiler* profiler, const char* tag) - : ScopedProfile( - profiler, tag, - Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, -1) {} - - void set_runtime_status(int64_t delegate_status, int64_t interpreter_status) { - if (profiler_) { - delegate_status_ = delegate_status; - interpreter_status_ = interpreter_status; - } - } - - ~ScopedRuntimeInstrumentationProfile() { - if (profiler_) { - profiler_->EndEvent(event_handle_, delegate_status_, interpreter_status_); - } - } - - private: - int64_t delegate_status_; - int64_t interpreter_status_; -}; - -} // namespace tflite - -#define TFLITE_VARNAME_UNIQ_IMPL(name, ctr) name##ctr -#define TFLITE_VARNAME_UNIQ(name, ctr) TFLITE_VARNAME_UNIQ_IMPL(name, ctr) - -#define TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler, tag) \ - tflite::ScopedProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \ - (profiler), (tag)) - -#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \ - tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \ - (profiler), (tag), (node_index)) - -#define TFLITE_SCOPED_DELEGATE_OPERATOR_PROFILE(profiler, tag, node_index) \ - tflite::ScopedDelegateOperatorProfile TFLITE_VARNAME_UNIQ( \ - _profile_, __COUNTER__)((profiler), (tag), (node_index)) - -#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \ - profiler, tag, delegate_status, interpreter_status) \ - do { \ - if (!profiler) { \ - const auto handle = profiler->BeginEvent( \ - tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \ - delegate_status, interpreter_status); \ - profiler->EndEvent(handle); \ - } \ - } while (false); - -#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/common.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/common.h index 662a1864..c433fc88 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/common.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/common.h @@ -178,14 +178,54 @@ inline int32_t MultiplyByQuantizedMultiplier(int64_t x, // - input x is in the range -(1<<47) <= x < (1<<47) assert(quantized_multiplier >= 0); assert(shift >= -31 && shift < 8); + assert(x >= -(static_cast(1) << 47) && + x < (static_cast(1) << 47)); - int32_t reduced_multiplier = (quantized_multiplier + (1 << 15)) >> 16; + int32_t reduced_multiplier = (quantized_multiplier < 0x7FFF0000) + ? ((quantized_multiplier + (1 << 15)) >> 16) + : 0x7FFF; int total_shift = 15 - shift; x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1)); int32_t result = x >> total_shift; return result; } +#ifdef USE_NEON +// Round uses ARM's rounding shift right. +inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows( + int32x4x4_t input_val, int32_t quantized_multiplier, int shift) { + const int left_shift = std::max(shift, 0); + const int right_shift = std::min(shift, 0); + int32x4x4_t result; + + int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier); + int32x4_t left_shift_dup = vdupq_n_s32(left_shift); + int32x4_t right_shift_dup = vdupq_n_s32(right_shift); + + result.val[0] = + vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup), + multiplier_dup), + right_shift_dup); + + result.val[1] = + vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup), + multiplier_dup), + right_shift_dup); + + result.val[2] = + vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup), + multiplier_dup), + right_shift_dup); + + result.val[3] = + vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup), + multiplier_dup), + right_shift_dup); + + return result; +} +#endif + template int CountLeadingZeros(T integer_input) { static_assert(std::is_unsigned::value, @@ -261,10 +301,11 @@ inline void gen_lut(double (*func)(double), double min, double max, TfLiteRound(func(min + i * step + half_step) * 32768.0); double midpoint_err = midpoint_interp_val - midpoint_val; double bias = TfLiteRound(midpoint_err / 2.0); - table[i] = std::min(std::max(sample_val - bias, -32768.0), 32767.0); + table[i] = std::min(std::max(sample_val - bias, -32768.0), + 32767.0); } - table[num - 1] = - std::min(std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0); + table[num - 1] = std::min( + std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0); } // generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in @@ -289,10 +330,11 @@ inline void gen_lut(float (*func)(float), float min, float max, int16_t* table, TfLiteRound(func(min + i * step + half_step) * 32768.0f); float midpoint_err = midpoint_interp_val - midpoint_val; float bias = TfLiteRound(midpoint_err / 2.0f); - table[i] = std::min(std::max(sample_val - bias, -32768.0f), 32767.0f); + table[i] = std::min(std::max(sample_val - bias, -32768.0f), + 32767.0f); } - table[num - 1] = std::min( - std::max(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f); + table[num - 1] = std::min( + std::max(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f); } // int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/cppmath.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/cppmath.h index 24a3aec8..5a32774b 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/cppmath.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/cppmath.h @@ -34,6 +34,7 @@ namespace tflite { } DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); +DECLARE_STD_GLOBAL_SWITCH1(TfLiteExpm1, expm1); } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/portable_tensor.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/portable_tensor.h index 8b0f6d1e..4d71c967 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/portable_tensor.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/portable_tensor.h @@ -15,7 +15,6 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_ -#include #include #include "tensorflow/lite/c/common.h" diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.cc b/code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.cc index cf431cff..ed0fe439 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.cc +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.cc @@ -289,7 +289,7 @@ void PreprocessSoftmaxScaling(double beta, double input_scale, input_beta_real_multiplier = (1ll << 31) - 1.0; } #else // TFLITE_EMULATE_FLOAT - const double input_beta_real_multiplier = std::min( + const double input_beta_real_multiplier = std::min( beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0); #endif // TFLITE_EMULATE_FLOAT diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add.h index 5be7ab4d..3da76d88 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add.h @@ -202,14 +202,6 @@ inline void Add(const ArithmeticParams& params, } } -// TODO(jiawen): We can implement BroadcastAdd on buffers of arbitrary -// dimensionality if the runtime code does a single loop over one dimension -// that handles broadcasting as the base case. The code generator would then -// generate max(D1, D2) nested for loops. -// TODO(benoitjacob): BroadcastAdd is intentionally duplicated from -// reference_ops.h. Once an optimized version is implemented and NdArrayDesc -// is no longer referenced in this file, move NdArrayDesc from types.h to -// reference_ops.h. inline void BroadcastAdd4DSlow(const ArithmeticParams& params, const RuntimeShape& input1_shape, const float* input1_data, diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add_n.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add_n.h new file mode 100644 index 00000000..fd1e02fe --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add_n.h @@ -0,0 +1,42 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_ + +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +// T is expected to be either float or int. +template +inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs, + const T* const* input_data, T* output_data) { + // All inputs and output should have the same shape, this is checked during + // Prepare stage. + const size_t size = input_shape.FlatSize(); + for (size_t i = 0; i < size; ++i) { + T x = 0; + for (size_t j = 0; j < num_inputs; ++j) { + x += input_data[j][i]; + } + output_data[i] = x; + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h index e6f34fd7..8154fbf7 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h @@ -15,12 +15,23 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ +#include + #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { namespace reference_ops { +template +std::function GetComparefunction(bool is_arg_max) { + if (is_arg_max) { + return std::greater(); + } else { + return std::less(); + } +} + template void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, const T3* input2_data, const RuntimeShape& output_shape, @@ -62,6 +73,15 @@ void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, } } } + +template +void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, + const T3* input2_data, const RuntimeShape& output_shape, + T2* output_data, const bool is_arg_max) { + ArgMinMax(input1_shape, input1_data, input2_data, output_shape, output_data, + GetComparefunction(is_arg_max)); +} + } // namespace reference_ops } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h new file mode 100644 index 00000000..cda46a26 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h @@ -0,0 +1,101 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_ + +#include + +#include "ruy/profiler/instrumentation.h" // from @ruy +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +// TODO(b/135760455): Move this method anonymous namespace in a cc file. +inline RuntimeShape ExtendShapeBatchToSpace(const RuntimeShape& shape) { + if (shape.DimensionsCount() == 4) { + return shape; + } + RuntimeShape new_shape(4, 1); + new_shape.SetDim(0, shape.Dims(0)); + new_shape.SetDim(1, shape.Dims(1)); + new_shape.SetDim(3, shape.Dims(2)); + return new_shape; +} + +template +inline void BatchToSpaceND(const RuntimeShape& unextended_input1_shape, + const T* input1_data, + const RuntimeShape& unextended_input2_shape, + const int32_t* block_shape_data, + const RuntimeShape& unextended_input3_shape, + const int32_t* crops_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + ruy::profiler::ScopeLabel label("BatchToSpaceND"); + TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3); + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(), + unextended_output_shape.DimensionsCount()); + + const RuntimeShape input1_shape = + ExtendShapeBatchToSpace(unextended_input1_shape); + const RuntimeShape output_shape = + ExtendShapeBatchToSpace(unextended_output_shape); + + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_batch_size = output_shape.Dims(0); + + const int depth = input1_shape.Dims(3); + const int input_width = input1_shape.Dims(2); + const int input_height = input1_shape.Dims(1); + const int input_batch_size = input1_shape.Dims(0); + + const int block_shape_height = block_shape_data[0]; + const int block_shape_width = + unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1; + const int crops_top = crops_data[0]; + const int crops_left = + unextended_input1_shape.DimensionsCount() == 4 ? crops_data[2] : 0; + for (int in_batch = 0; in_batch < input_batch_size; ++in_batch) { + const int out_batch = in_batch % output_batch_size; + const int spatial_offset = in_batch / output_batch_size; + for (int in_h = 0; in_h < input_height; ++in_h) { + const int out_h = in_h * block_shape_height + + spatial_offset / block_shape_width - crops_top; + if (out_h < 0 || out_h >= output_height) { + continue; + } + for (int in_w = 0; in_w < input_width; ++in_w) { + const int out_w = in_w * block_shape_width + + spatial_offset % block_shape_width - crops_left; + + if (out_w < 0 || out_w >= output_width) { + continue; + } + T* out = output_data + Offset(output_shape, out_batch, out_h, out_w, 0); + const T* in = + input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0); + memcpy(out, in, depth * sizeof(T)); + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h index 51d9e2b7..1711940c 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h @@ -23,9 +23,6 @@ namespace tflite { namespace reference_ops { -// TODO(ycling): Refactoring. Remove BroadcastLogical and use the more -// generalized and efficient BroadcastBinaryFunction. -// // Also appears to duplicate MinimumMaximum. // // R: Result type. T1: Input 1 type. T2: Input 2 type. @@ -63,7 +60,6 @@ inline void BroadcastBinaryFunction4DSlow( } // R: Result type. T1: Input 1 type. T2: Input 2 type. -// TODO(renjieliu): Refactor other binary functions to use this one. template inline void BinaryFunction(const RuntimeShape& input1_shape, const T1* input1_data, diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/concatenation.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/concatenation.h index 25959793..998bb093 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/concatenation.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/concatenation.h @@ -68,8 +68,7 @@ inline void Concatenation(const ConcatenationParams& params, } } -// TODO(prabhumk): This is the same as the optimized implementation. -// TODO(prabhumk): The quantized implementation of concatentation isn't fully +// TODO(b/174275780): The quantized implementation of concatentation isn't fully // quantized as it takes scale as a floating point value. This should be fixed // when optimizng this routine further. inline void ConcatenationWithScaling(const ConcatenationParams& params, diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/conv.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/conv.h index b912ac1b..5a6369d8 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/conv.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/conv.h @@ -15,16 +15,13 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ -#include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/internal/common.h" - - +#include "tensorflow/lite/kernels/internal/types.h" namespace tflite { namespace reference_ops { - inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, const float* input_data, const RuntimeShape& filter_shape, const float* filter_data, const RuntimeShape& bias_shape, @@ -108,8 +105,8 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, uint8_t* output_data, const RuntimeShape& im2col_shape, uint8_t* im2col_data, void* cpu_backend_context) { (void)cpu_backend_context; // only used in optimized code. - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. const int stride_width = params.stride_width; const int stride_height = params.stride_height; const int dilation_width_factor = params.dilation_width_factor; diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/div.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/div.h new file mode 100644 index 00000000..269d27a0 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/div.h @@ -0,0 +1,239 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_ + +#include + +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { + +namespace reference_ops { + +template +inline void DivCheckArithmeticParams(const ArithmeticParams& params) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + // Input offset is negative input zero point. Activation tensors are + // asymmetric quantized so they span the full int8 range. + constexpr int32_t max_value = + static_cast(std::numeric_limits::max()); + TFLITE_DCHECK_GE(params.input1_offset, -max_value); + TFLITE_DCHECK_LE(params.input1_offset, max_value); + TFLITE_DCHECK_GE(params.input2_offset, -max_value); + TFLITE_DCHECK_LE(params.input2_offset, max_value); + TFLITE_DCHECK_GE(params.output_offset, -max_value); + TFLITE_DCHECK_LE(params.output_offset, max_value); +} + +// Element-wise div that can often be used for inner loop of broadcast Div as +// well as the non-broadcast Div. +template +inline void DivElementwise(int size, const ArithmeticParams& params, + const T* input1_data, const T* input2_data, + T* output_data) { + DivCheckArithmeticParams(params); + + for (int i = 0; i < size; ++i) { + const int32_t input1_val = params.input1_offset + input1_data[i]; + const int32_t input2_val = params.input2_offset + input2_data[i]; + TFLITE_DCHECK_NE(input2_val, 0); + int recip_shift; + const int32_t input2_inv = + (input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift) + : -GetReciprocal(-input2_val, 31, &recip_shift); + const int headroom = CountLeadingSignBits(input1_val); + const int32_t unscaled_quotient = + MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv, + headroom); + const int total_shift = params.output_shift - recip_shift - headroom; + const int32_t unclamped_result = + params.output_offset + + MultiplyByQuantizedMultiplierSmallerThanOneExp( + unscaled_quotient, params.output_multiplier, total_shift); + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, unclamped_result)); + output_data[i] = static_cast(clamped_output); + } +} + +inline void Div(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const uint8_t* input1_data, + const RuntimeShape& input2_shape, const uint8_t* input2_data, + const RuntimeShape& output_shape, uint8_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + + DivElementwise(flat_size, params, input1_data, input2_data, output_data); +} + +inline void Div(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const int8_t* input1_data, + const RuntimeShape& input2_shape, const int8_t* input2_data, + const RuntimeShape& output_shape, int8_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + + DivElementwise(flat_size, params, input1_data, input2_data, output_data); +} + +template +inline void BroadcastDivSlowQuantized( + const ArithmeticParams& params, const RuntimeShape& unextended_input1_shape, + const T* input1_data, const RuntimeShape& unextended_input2_shape, + const T* input2_data, const RuntimeShape& unextended_output_shape, + T* output_data) { + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N); + + NdArrayDesc desc1; + NdArrayDesc desc2; + NdArrayDesc output_desc; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, + unextended_input2_shape, &desc1, &desc2); + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), + &output_desc); + + DivCheckArithmeticParams(params); + + auto div_func = [&](int indexes[N]) { + const int32_t input1_val = + params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; + const int32_t input2_val = + params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; + TFLITE_DCHECK_NE(input2_val, 0); + int recip_shift; + const int32_t input2_inv = + (input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift) + : -GetReciprocal(-input2_val, 31, &recip_shift); + const int headroom = CountLeadingSignBits(input1_val); + const int32_t unscaled_quotient = + MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv, + headroom); + const int total_shift = params.output_shift - recip_shift - headroom; + const int32_t unclamped_result = + params.output_offset + + MultiplyByQuantizedMultiplierSmallerThanOneExp( + unscaled_quotient, params.output_multiplier, total_shift); + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, unclamped_result)); + output_data[SubscriptToIndex(output_desc, indexes)] = + static_cast(clamped_output); + }; + NDOpsHelper(output_desc, div_func); +} + +template +inline void BroadcastDivSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const uint8_t* input1_data, + const RuntimeShape& unextended_input2_shape, + const uint8_t* input2_data, + const RuntimeShape& unextended_output_shape, + uint8_t* output_data) { + BroadcastDivSlowQuantized( + params, unextended_input1_shape, input1_data, unextended_input2_shape, + input2_data, unextended_output_shape, output_data); +} + +template +inline void BroadcastDivSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const int8_t* input1_data, + const RuntimeShape& unextended_input2_shape, + const int8_t* input2_data, + const RuntimeShape& unextended_output_shape, + int8_t* output_data) { + BroadcastDivSlowQuantized( + params, unextended_input1_shape, input1_data, unextended_input2_shape, + input2_data, unextended_output_shape, output_data); +} + +// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary +// dimensionality if the runtime code does a single loop over one dimension +// that handles broadcasting as the base case. The code generator would then +// generate max(D1, D2) nested for loops. +template +void BroadcastDivSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const T* input1_data, + const RuntimeShape& unextended_input2_shape, + const T* input2_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + T output_activation_min; + T output_activation_max; + GetActivationParams(params, &output_activation_min, &output_activation_max); + + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N); + + NdArrayDesc desc1; + NdArrayDesc desc2; + NdArrayDesc output_desc; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, + unextended_input2_shape, &desc1, &desc2); + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), + &output_desc); + + // In Tensorflow, the dimensions are canonically named (batch_number, row, + // col, channel), with extents (batches, height, width, depth), with the + // trailing dimension changing most rapidly (channels has the smallest + // stride, typically 1 element). + // + // In generated C code, we store arrays with the dimensions reversed. The + // first dimension has smallest stride. + + auto div_func = [&](int indexes[N]) { + output_data[SubscriptToIndex(output_desc, indexes)] = + ActivationFunctionWithMinMax( + input1_data[SubscriptToIndex(desc1, indexes)] / + input2_data[SubscriptToIndex(desc2, indexes)], + output_activation_min, output_activation_max); + }; + NDOpsHelper(output_desc, div_func); +} + +template +inline void Div(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const T* input1_data, + const RuntimeShape& input2_shape, const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { + T output_activation_min; + T output_activation_max; + GetActivationParams(params, &output_activation_min, &output_activation_max); + + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + output_data[i] = ActivationFunctionWithMinMax( + input1_data[i] / input2_data[i], output_activation_min, + output_activation_max); + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/elu.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/elu.h new file mode 100644 index 00000000..3dc93589 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/elu.h @@ -0,0 +1,37 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_ + +#include "tensorflow/lite/kernels/internal/cppmath.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +namespace reference_ops { + +inline void Elu(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const float val = input_data[i]; + output_data[i] = val < 0.0f ? TfLiteExpm1(val) : val; + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/exp.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/exp.h new file mode 100644 index 00000000..134ee13f --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/exp.h @@ -0,0 +1,38 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_ + +#include + +#include "ruy/profiler/instrumentation.h" // from @ruy +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +inline void Exp(const T* input_data, const size_t num_elements, + T* output_data) { + ruy::profiler::ScopeLabel label("Exp"); + for (size_t idx = 0; idx < num_elements; ++idx) { + output_data[idx] = std::exp(input_data[idx]); + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fill.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fill.h new file mode 100644 index 00000000..16630e61 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fill.h @@ -0,0 +1,38 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_ + +#include + +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +void Fill(const RuntimeShape& value_shape, const T* value_data, + const RuntimeShape& output_shape, T* output_data) { + TFLITE_DCHECK_EQ(value_shape.DimensionsCount(), 0); + const int flat_size = output_shape.FlatSize(); + for (int i = 0; i < flat_size; ++i) { + output_data[i] = *value_data; + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fully_connected.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fully_connected.h index 39a9cd02..d5ad9d67 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fully_connected.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fully_connected.h @@ -31,7 +31,7 @@ inline void FullyConnected( float* output_data) { const float output_activation_min = params.float_activation_min; const float output_activation_max = params.float_activation_max; - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each @@ -76,7 +76,7 @@ inline void FullyConnected( TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each @@ -123,7 +123,7 @@ inline void FullyConnected( TFLITE_DCHECK_LE(output_activation_min, output_activation_max); TFLITE_DCHECK_EQ(output_offset, 0); - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each @@ -176,7 +176,7 @@ inline void ShuffledFullyConnected( TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1); TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2); TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/add.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/add.h index 2af6f373..10bee904 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/add.h @@ -34,55 +34,24 @@ inline void CheckArithmeticParams(const ArithmeticParams& params) { TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits::max()); } -// Element-wise add that can often be used for inner loop of broadcast add as -// well as the non-broadcast add. -inline void AddElementwise(int size, const ArithmeticParams& params, - const int8_t* input1_data, const int8_t* input2_data, - int8_t* output_data) { +inline void ElementWise( + int size, const ArithmeticParams& params, const int8_t* input1_data, + const int8_t* input2_data, int8_t* output_data, + void (*check_arithmetic_params)(const ArithmeticParams&), + int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) { CheckArithmeticParams(params); - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sum = scaled_input1_val + scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sum, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); + output_data[i] = binary_func(input1_data[i], input2_data[i], params); } } -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const int8_t* input1_data, - const RuntimeShape& input2_shape, const int8_t* input2_data, - const RuntimeShape& output_shape, int8_t* output_data) { - CheckArithmeticParams(params); - - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - AddElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int8_t* input1_data, - const RuntimeShape& input2_shape, - const int8_t* input2_data, - const RuntimeShape& output_shape, - int8_t* output_data) { +inline void BroadcastBinaryFunction4DSlow( + const ArithmeticParams& params, const RuntimeShape& input1_shape, + const int8_t* input1_data, const RuntimeShape& input2_shape, + const int8_t* input2_data, const RuntimeShape& output_shape, + int8_t* output_data, + void (*check_arithmetic_params)(const ArithmeticParams&), + int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) { NdArrayDesc<4> desc1; NdArrayDesc<4> desc2; NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, @@ -105,40 +74,70 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params, for (int y = 0; y < extended_output_shape.Dims(1); ++y) { for (int x = 0; x < extended_output_shape.Dims(2); ++x) { for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - const int32_t input1_val = - params.input1_offset + - input1_data[SubscriptToIndex(desc1, b, y, x, c)]; - const int32_t input2_val = - params.input2_offset + - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - const int32_t shifted_input1_val = - input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = - input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, - params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, - params.input2_shift); - const int32_t raw_sum = scaled_input1_val + scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sum, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[Offset(extended_output_shape, b, y, x, c)] = - static_cast(clamped_output); + output_data[Offset(extended_output_shape, b, y, x, c)] = binary_func( + input1_data[SubscriptToIndex(desc1, b, y, x, c)], + input2_data[SubscriptToIndex(desc2, b, y, x, c)], params); } } } } } +inline int8_t AddFunc(int8_t x, int8_t y, const ArithmeticParams& params) { + const int32_t input1_val = params.input1_offset + x; + const int32_t input2_val = params.input2_offset + y; + const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); + const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); + const int32_t scaled_input1_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input1_val, params.input1_multiplier, params.input1_shift); + const int32_t scaled_input2_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input2_val, params.input2_multiplier, params.input2_shift); + const int32_t raw_sum = scaled_input1_val + scaled_input2_val; + const int32_t raw_output = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + raw_sum, params.output_multiplier, params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + return static_cast(clamped_output); +} + +// Element-wise add that can often be used for inner loop of broadcast add as +// well as the non-broadcast add. +inline void AddElementwise(int size, const ArithmeticParams& params, + const int8_t* input1_data, const int8_t* input2_data, + int8_t* output_data) { + ElementWise(size, params, input1_data, input2_data, output_data, + CheckArithmeticParams, AddFunc); +} + +inline void Add(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const int8_t* input1_data, + const RuntimeShape& input2_shape, const int8_t* input2_data, + const RuntimeShape& output_shape, int8_t* output_data) { + CheckArithmeticParams(params); + + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + + AddElementwise(flat_size, params, input1_data, input2_data, output_data); +} + +inline void BroadcastAdd4DSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const int8_t* input1_data, + const RuntimeShape& input2_shape, + const int8_t* input2_data, + const RuntimeShape& output_shape, + int8_t* output_data) { + BroadcastBinaryFunction4DSlow(params, input1_shape, input1_data, input2_shape, + input2_data, output_shape, output_data, + CheckArithmeticParams, AddFunc); +} + } // namespace reference_integer_ops } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h index 3e9cd0ca..3a4164d3 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h @@ -101,7 +101,7 @@ inline void ConvPerChannel( // long as the filter size (filter_y * filter_x * in_channel) // does not exceed 2^16, which is the case in all the models // we have seen so far. - // TODO(jianlijianli): Add a check to make sure the + // TODO(b/174275578): Add a check to make sure the // accumulator depth is smaller than 2^16. acc += filter_val * (input_val + input_offset); } diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h index 6f54e47f..f0ca09c7 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h @@ -95,7 +95,7 @@ inline void DepthwiseConvPerChannel( // long as the filter size (filter_y * filter_x * in_channel) // does not exceed 2^16, which is the case in all the models // we have seen so far. - // TODO(jianlijianli): Add a check to make sure the + // TODO(b/174275578): Add a check to make sure the // accumulator depth is smaller than 2^16. acc += filter_val * (input_val + input_offset); } diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h index d1a15bd9..95697ec9 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h @@ -58,23 +58,36 @@ inline void Logistic(int32_t input_zero_point, int32_t input_range_radius, } } -inline void Logistic(int32_t input_multiplier, int32_t input_size, - const int16_t* ptr_input_data, int16_t* ptr_output_data) { +inline void Logistic(int32_t input_multiplier, int32_t input_left_shift, + int32_t input_size, const int16_t* ptr_input_data, + int16_t* ptr_output_data) { // We use the LUT for sigmoid and take into account, that // tanh(x) = 2*sigmoid(2*x) - 1 - int32_t input_data_mul = (input_multiplier > 0) ? input_multiplier : 1; + // We scale by 3/4 to expand range [-8,8]->[-10.7,10.7]. + // In case of general parameter scale, multiplier 3 is taken into account + // in TanhPrepare function and it is included in + // input_multiplier already. + + TFLITE_DCHECK_GE(input_left_shift, 0); + if (input_multiplier == 0) { // power of two case + input_multiplier = 3 << input_left_shift; + input_left_shift = 0; + } + + int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0; for (int i = 0; i < input_size; ++i, ptr_input_data++, ptr_output_data++) { - int32_t input_data = (*ptr_input_data) * input_data_mul; + int32_t input_data = + ((*ptr_input_data) * input_multiplier + round) >> input_left_shift; - // Scale by 3/4 to expand range [-8,8]->[-10.7,10.7] and - // we do interpolation on unsigned values. - uint32_t abs_input_data = 3 * abs(input_data); + // We do interpolation on unsigned values. + uint32_t abs_input_data = abs(input_data); // We divide by 2 power of 9, because // we need to divide by 2 in power of 7 for // the input conversion + 1/4 from the scale above. + // Define uh as uint32_t type not to make this function overflow. uint32_t uh = abs_input_data >> 9; uint32_t result; diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h index 81ff34fe..63e40936 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h @@ -65,19 +65,25 @@ inline void Tanh(int32_t input_multiplier, int32_t input_left_shift, // We use the LUT for sigmoid and take into account, that // tanh(x) = 2*sigmoid(2*x) - 1 - int32_t input_data_mul = (input_multiplier > 0) ? input_multiplier : 1; + // We scale by 3/4 to expand range [-8,8]->[-10.7,10.7]. + // In case of general parameter scale, multiplier 3 is taken into account + // in TanhPrepare function and it is included in + // input_multiplier already. + + if (input_multiplier == 0) { // power of two case + input_multiplier = 3 << input_left_shift; + input_left_shift = 0; + } + + int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0; int flat_size = MatchingFlatSize(input_shape, output_shape); for (int i = 0; i < flat_size; ++i, ptr_input_data++, ptr_output_data++) { - int32_t input_data = (*ptr_input_data) * input_data_mul; + int32_t input_data = + ((*ptr_input_data) * input_multiplier + round) >> input_left_shift; - if (input_left_shift == 1) { - input_data <<= 1; - } - - // Scale by 3/4 to expand range [-8,8]->[-10.7,10.7]. - uint32_t abs_input_data = 3 * abs(input_data); + uint32_t abs_input_data = abs(input_data); uint32_t uh = abs_input_data >> 8; int32_t result; diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h new file mode 100644 index 00000000..284c0f21 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h @@ -0,0 +1,221 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ + +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { +namespace reference_integer_ops { + +// Fixed-point per-channel-quantization transpose convolution reference kernel. +inline void TransposeConv( + const ConvParams& params, const int32_t* output_multiplier, + const int32_t* output_shift, const RuntimeShape& input_shape, + const int8_t* input_data, const RuntimeShape& filter_shape, + const int8_t* filter_data, const RuntimeShape& bias_shape, + const int32_t* bias_data, const RuntimeShape& output_shape, + int8_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data, + int32_t* scratch_buffer) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int32_t input_offset = params.input_offset; + const int32_t output_offset = params.output_offset; + const int32_t output_activation_min = std::numeric_limits::min(); + const int32_t output_activation_max = std::numeric_limits::max(); + TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + + const int num_elements = output_shape.FlatSize(); + // We need to initialize scratch_buffer to all 0s, as we apply the same + // 'scatter' based trick as in float version. + memset(scratch_buffer, 0, num_elements * sizeof(int32_t)); + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence. + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location. + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds. + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + const int8_t input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + const int8_t filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + (input_value + input_offset) * filter_value; + } + } + } + } + } + } + } + } + + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + int32_t acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) { + acc += bias_data[out_channel]; + } + acc = MultiplyByQuantizedMultiplier( + acc, output_multiplier[out_channel], output_shift[out_channel]); + acc += output_offset; + acc = std::max(acc, output_activation_min); + acc = std::min(acc, output_activation_max); + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + static_cast(acc); + } + } + } + } +} + +// int16_t input (zero_point=0), int8_t filter, int64 accumulator +inline void TransposeConv( + const ConvParams& params, const int32_t* output_multiplier, + const int32_t* output_shift, const RuntimeShape& input_shape, + const int16_t* input_data, const RuntimeShape& filter_shape, + const int8_t* filter_data, const RuntimeShape& bias_shape, + const std::int64_t* bias_data, const RuntimeShape& output_shape, + int16_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data, + std::int64_t* scratch_buffer) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int32_t output_activation_min = std::numeric_limits::min(); + const int32_t output_activation_max = std::numeric_limits::max(); + TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + + const int num_elements = output_shape.FlatSize(); + // We need to initialize scratch_buffer to all 0s, as we apply the same + // 'scatter' based trick as in float version. + memset(scratch_buffer, 0, num_elements * sizeof(std::int64_t)); + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence. + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location. + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds. + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + const int32_t input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + const int32_t filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + input_value * filter_value; + } + } + } + } + } + } + } + } + + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + std::int64_t acc = scratch_buffer[Offset(output_shape, batch, out_y, + out_x, out_channel)]; + if (bias_data) { + acc += bias_data[out_channel]; + } + int32_t scaled_acc = MultiplyByQuantizedMultiplier( + acc, output_multiplier[out_channel], output_shift[out_channel]); + scaled_acc = std::max(scaled_acc, output_activation_min); + scaled_acc = std::min(scaled_acc, output_activation_max); + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + static_cast(scaled_acc); + } + } + } + } +} + +} // namespace reference_integer_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/leaky_relu.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/leaky_relu.h new file mode 100644 index 00000000..06f691ab --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/leaky_relu.h @@ -0,0 +1,69 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_ + +#include +#include + +#include "tensorflow/lite/kernels/internal/common.h" + +namespace tflite { +namespace reference_ops { + +inline void LeakyRelu(const tflite::LeakyReluParams& params, + const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const float val = input_data[i]; + // Note that alpha might be > 1 or < 0, so we don't use std::max here. + output_data[i] = val > 0 ? val : val * params.alpha; + } +} + +template +inline void QuantizeLeakyRelu(const LeakyReluParams& params, + const RuntimeShape& input_shape, + const T* input_data, + const RuntimeShape& output_shape, + T* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + static const int32_t quantized_min = std::numeric_limits::min(); + static const int32_t quantized_max = std::numeric_limits::max(); + for (int i = 0; i < flat_size; ++i) { + const int32_t input_value = input_data[i] - params.input_offset; + int32_t unclamped_output; + if (input_value >= 0) { + unclamped_output = params.output_offset + + MultiplyByQuantizedMultiplier( + input_value, params.output_multiplier_identity, + params.output_shift_identity); + } else { + unclamped_output = params.output_offset + + MultiplyByQuantizedMultiplier( + input_value, params.output_multiplier_alpha, + params.output_shift_alpha); + } + const T clamped_output = + std::min(quantized_max, std::max(quantized_min, unclamped_output)); + output_data[i] = static_cast(clamped_output); + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/requantize.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/requantize.h index 32e32ed0..d1e67785 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/requantize.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/requantize.h @@ -45,6 +45,7 @@ inline void Requantize(const input_type* input_data, int32_t size, for (int i = 0; i < size; ++i) { output_data[i] = input_data[i] ^ 0x80; } + return; } } static constexpr int32_t kMinOutput = std::numeric_limits::min(); diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h new file mode 100644 index 00000000..7f844152 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h @@ -0,0 +1,109 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_ + +#include + +#include "ruy/profiler/instrumentation.h" // from @ruy +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +// TODO(b/135760455): Move this method anonymous namespace in a cc file. +inline RuntimeShape ExtendShapeSpaceToBatch(const RuntimeShape& shape) { + if (shape.DimensionsCount() == 4) { + return shape; + } + RuntimeShape new_shape(4, 1); + new_shape.SetDim(0, shape.Dims(0)); + new_shape.SetDim(1, shape.Dims(1)); + new_shape.SetDim(3, shape.Dims(2)); + return new_shape; +} + +template +inline void SpaceToBatchND(const SpaceToBatchParams& params, + const RuntimeShape& unextended_input1_shape, + const T* input1_data, + const RuntimeShape& unextended_input2_shape, + const int32_t* block_shape_data, + const RuntimeShape& unextended_input3_shape, + const int32_t* paddings_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + ruy::profiler::ScopeLabel label("SpaceToBatchND"); + TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3); + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(), + unextended_output_shape.DimensionsCount()); + + // Extends the input/output shape from 3D to 4D if needed, NHC -> NH1C. + const RuntimeShape input1_shape = + ExtendShapeSpaceToBatch(unextended_input1_shape); + const RuntimeShape output_shape = + ExtendShapeSpaceToBatch(unextended_output_shape); + + const int depth = input1_shape.Dims(3); + const int input_width = input1_shape.Dims(2); + const int input_height = input1_shape.Dims(1); + const int input_batch_size = input1_shape.Dims(0); + + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_batch_size = output_shape.Dims(0); + + const int block_shape_height = block_shape_data[0]; + const int block_shape_width = + unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1; + const int padding_top = paddings_data[0]; + const int padding_left = + unextended_input1_shape.DimensionsCount() == 4 ? paddings_data[2] : 0; + + // For uint8 quantized, the correct padding "zero value" is the output offset. + const int32_t pad_value = params.output_offset; + for (int out_b = 0; out_b < output_batch_size; ++out_b) { + int input_batch = out_b % input_batch_size; + int shift_w = (out_b / input_batch_size) % block_shape_width; + int shift_h = (out_b / input_batch_size) / block_shape_width; + for (int out_h = 0; out_h < output_height; ++out_h) { + for (int out_w = 0; out_w < output_width; ++out_w) { + T* out = output_data + Offset(output_shape, out_b, out_h, out_w, 0); + if (out_h * block_shape_height + shift_h < padding_top || + out_h * block_shape_height + shift_h >= + padding_top + input_height || + out_w * block_shape_width + shift_w < padding_left || + out_w * block_shape_width + shift_w >= padding_left + input_width) { + // This may not execute correctly when pad_value != 0 and T != uint8. + memset(out, pad_value, depth * sizeof(T)); + } else { + const T* in = + input1_data + + Offset(input1_shape, input_batch, + (out_h * block_shape_height + shift_h) - padding_top, + (out_w * block_shape_width + shift_w) - padding_left, 0); + memcpy(out, in, depth * sizeof(T)); + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_BATCH_ND_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/strided_slice.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/strided_slice.h index 8b6f0c13..40dc2e91 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/strided_slice.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/strided_slice.h @@ -15,23 +15,28 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ +#include "ruy/profiler/instrumentation.h" // from @ruy #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/kernels/internal/portable_tensor.h" #include "tensorflow/lite/kernels/internal/strided_slice_logic.h" #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { namespace reference_ops { + template inline void StridedSlice(const tflite::StridedSliceParams& op_params, const RuntimeShape& unextended_input_shape, - const T* input_data, const RuntimeShape& unextended_output_shape, - T* output_data) { + SequentialTensorWriter* writer) { using strided_slice::LoopCondition; using strided_slice::StartForAxis; using strided_slice::StopForAxis; + + ruy::profiler::ScopeLabel label("StridedSlice"); + // Note that the output_shape is not used herein. tflite::StridedSliceParams params_copy = op_params; @@ -57,7 +62,6 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params, const int start_4 = StartForAxis(params_copy, input_shape, 4); const int stop_4 = StopForAxis(params_copy, input_shape, 4, start_4); - T* out_ptr = output_data; for (int offset_0 = start_0 * input_shape.Dims(1), end_0 = stop_0 * input_shape.Dims(1), step_0 = params_copy.strides[0] * input_shape.Dims(1); @@ -81,13 +85,36 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params, for (int offset_4 = offset_3 + start_4, end_4 = offset_3 + stop_4; !LoopCondition(offset_4, end_4, params_copy.strides[4]); offset_4 += params_copy.strides[4]) { - *out_ptr++ = input_data[offset_4]; + writer->Write(offset_4); } } } } } } + +template +inline void StridedSlice(const tflite::StridedSliceParams& op_params, + const RuntimeShape& unextended_input_shape, + const T* input_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + SequentialTensorWriter writer(input_data, output_data); + StridedSlice(op_params, unextended_input_shape, unextended_output_shape, + &writer); +} + +template +inline void StridedSlice(const tflite::StridedSliceParams& op_params, + const RuntimeShape& unextended_input_shape, + const TfLiteTensor* input, + const RuntimeShape& unextended_output_shape, + TfLiteTensor* output) { + SequentialTensorWriter writer(input, output); + StridedSlice(op_params, unextended_input_shape, unextended_output_shape, + &writer); +} + } // namespace reference_ops } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/sub.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/sub.h index b27f251d..b8b8b732 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/sub.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/sub.h @@ -65,10 +65,6 @@ inline void SubNonBroadcast(const ArithmeticParams& params, // dimensionality if the runtime code does a single loop over one dimension // that handles broadcasting as the base case. The code generator would then // generate max(D1, D2) nested for loops. -// TODO(b/151345101): BroadcastSub is intentionally duplicated from -// reference_ops.h. Once an optimized version is implemented and NdArrayDesc -// is no longer referenced in this file, move NdArrayDesc from types.h to -// reference_ops.h. template inline void BroadcastSubSlow(const ArithmeticParams& params, const RuntimeShape& input1_shape, @@ -336,6 +332,50 @@ void BroadcastSubSlow(const ArithmeticParams& params, NDOpsHelper(output_desc, sub_func); } +template +inline void BroadcastSub16POTSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const int16_t* input1_data, + const RuntimeShape& input2_shape, + const int16_t* input2_data, + const RuntimeShape& output_shape, + int16_t* output_data) { + ruy::profiler::ScopeLabel label("BroadcastSub16POTSlow/int16_t"); + NdArrayDesc desc1; + NdArrayDesc desc2; + NdArrayDesc output_desc; + NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, + &desc2); + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); + + // In Tensorflow, the dimensions are canonically named (batch_number, row, + // col, channel), with extents (batches, height, width, depth), with the + // trailing dimension changing most rapidly (channels has the smallest stride, + // typically 1 element). + // + // In generated C code, we store arrays with the dimensions reversed. The + // first dimension has smallest stride. + // + // We name our variables by their Tensorflow convention, but generate C code + // nesting loops such that the innermost loop has the smallest stride for the + // best cache behavior. + auto sub_func = [&](int indexes[N]) { + const int32_t input1_val = input1_data[SubscriptToIndex(desc1, indexes)]; + const int32_t input2_val = input2_data[SubscriptToIndex(desc2, indexes)]; + const int32_t scaled_input1_val = + gemmlowp::RoundingDivideByPOT(input1_val, -params.input1_shift); + const int32_t scaled_input2_val = + gemmlowp::RoundingDivideByPOT(input2_val, -params.input2_shift); + const int32_t raw_output = scaled_input1_val - scaled_input2_val; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + output_data[SubscriptToIndex(output_desc, indexes)] = + static_cast(clamped_output); + }; + NDOpsHelper(output_desc, sub_func); +} + // Element-wise Sub that can often be used for inner loop of broadcast sub as // well as the non-broadcast sub. inline void SubElementwise(int size, const ArithmeticParams& params, diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/transpose_conv.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/transpose_conv.h new file mode 100644 index 00000000..6e9cb1f9 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/transpose_conv.h @@ -0,0 +1,217 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ + +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +namespace reference_ops { + +inline void TransposeConv( + const ConvParams& params, const RuntimeShape& input_shape, + const float* input_data, const RuntimeShape& filter_shape, + const float* filter_data, const RuntimeShape& bias_shape, + const float* bias_data, const RuntimeShape& output_shape, + float* output_data, const RuntimeShape& im2col_shape, float* im2col_data) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + // Although transpose convolution simplifies to convolution with transposed + // weights for strides of 1, non-unitary striding complicates matters. To + // keep this reference implementation as clear as possible, we use a + // "scatter" access pattern, where we loop through all the input elements, + // computing their influence on the output, rather than looping through the + // output elements in the typical "gather" access pattern of a conv. We + // therefore must initialize the output array to zero. + const int num_elements = output_shape.FlatSize(); + for (int i = 0; i < num_elements; i++) { + output_data[i] = 0.0f; + } + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + float input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + float filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + input_value * filter_value; + } + } + } + } + } + } + } + } + if (bias_data) { + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)] += bias_data[out_channel]; + } + } + } + } + } +} + +inline void TransposeConv( + const ConvParams& params, const RuntimeShape& input_shape, + const uint8_t* input_data, const RuntimeShape& filter_shape, + const uint8_t* filter_data, const RuntimeShape& bias_shape, + const int32_t* bias_data, const RuntimeShape& output_shape, + uint8_t* output_data, const RuntimeShape& im2col_shape, + uint8_t* im2col_data, int32_t* scratch_buffer) { + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = params.padding_values.width; + const int pad_height = params.padding_values.height; + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + (void)im2col_data; // only used in optimized code. + (void)im2col_shape; // only used in optimized code. + + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int32_t input_offset = params.input_offset; + const int32_t filter_offset = params.weights_offset; + const int32_t output_offset = params.output_offset; + const int32_t output_multiplier = params.output_multiplier; + const int output_shift = params.output_shift; + const int32_t output_activation_min = params.quantized_activation_min; + const int32_t output_activation_max = params.quantized_activation_max; + TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + if (bias_data) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + const int num_elements = output_shape.FlatSize(); + // We need to initialize scratch_buffer to all 0s, as we apply the same + // 'scatter' based trick as in float version. + memset(scratch_buffer, 0, num_elements * sizeof(int32_t)); + + // Loop through input elements one at a time. + for (int batch = 0; batch < batches; ++batch) { + for (int in_y = 0; in_y < input_height; ++in_y) { + for (int in_x = 0; in_x < input_width; ++in_x) { + for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + // Loop through the output elements it will influence. + const int out_x_origin = (in_x * stride_width) - pad_width; + const int out_y_origin = (in_y * stride_height) - pad_height; + for (int filter_y = 0; filter_y < filter_height; ++filter_y) { + for (int filter_x = 0; filter_x < filter_width; ++filter_x) { + for (int out_channel = 0; out_channel < output_depth; + ++out_channel) { + // Compute output element location. + const int out_x = out_x_origin + filter_x; + const int out_y = out_y_origin + filter_y; + // We cannot accumulate out of bounds. + if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && + (out_y < output_height)) { + uint8_t input_value = input_data[Offset( + input_shape, batch, in_y, in_x, in_channel)]; + uint8_t filter_value = + filter_data[Offset(filter_shape, out_channel, filter_y, + filter_x, in_channel)]; + scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)] += + (input_value + input_offset) * + (filter_value + filter_offset); + } + } + } + } + } + } + } + } + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + int32_t acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) { + acc += bias_data[out_channel]; + } + int32_t scaled_acc = MultiplyByQuantizedMultiplier( + acc, output_multiplier, output_shift); + scaled_acc += output_offset; + scaled_acc = std::max(scaled_acc, output_activation_min); + scaled_acc = std::min(scaled_acc, output_activation_max); + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + static_cast(scaled_acc); + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/strided_slice_logic.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/strided_slice_logic.h index 3d91fbdb..bfe84050 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/strided_slice_logic.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/strided_slice_logic.h @@ -140,7 +140,7 @@ inline int StopForAxis(const tflite::StridedSliceParams& params, // start_for_axis + 1 to generate a length 1 slice, since start_for_axis has // already been adjusted for negative indices. if (shrink_axis) { - stop = start_for_axis + 1; + return start_for_axis + 1; } // end_mask override diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/types.h b/code/components/tfmicro/tensorflow/lite/kernels/internal/types.h index 37403a88..99e7bb98 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/internal/types.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/internal/types.h @@ -43,6 +43,20 @@ struct PaddingValues { int16_t height_offset; }; +struct Padding3DValues { + int16_t width; + int16_t height; + int16_t depth; + // offset is used for calculating "remaining" padding, for example, `width` + // is 1 and `width_offset` is 1, so padding_left is 1 while padding_right is + // 1 + 1 = 2. + int16_t width_offset; + // Same as width_offset except it's over the height dimension. + int16_t height_offset; + // Same as width_offset except it's over the depth dimension. + int16_t depth_offset; +}; + // This enumeration allows for non-default formats for the weights array // of a fully-connected operator, allowing the use of special optimized // runtime paths. @@ -170,7 +184,11 @@ class RuntimeShape { // rolls out. RuntimeShape(RuntimeShape const& other) : size_(other.DimensionsCount()) { if (size_ > kMaxSmallSize) { +#ifdef TF_LITE_STATIC_MEMORY + TFLITE_CHECK(false && "No shape resizing supported on this platform"); +#else dims_pointer_ = new int32_t[size_]; +#endif } std::memcpy(DimsData(), other.DimsData(), sizeof(int32_t) * size_); } @@ -392,6 +410,20 @@ inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) { return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3; } +inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3, + int i4) { + TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5); + const int* dims_data = reinterpret_cast(shape.DimsDataUpTo5D()); + TFLITE_DCHECK(i0 >= 0 && i0 < dims_data[0]); + TFLITE_DCHECK(i1 >= 0 && i1 < dims_data[1]); + TFLITE_DCHECK(i2 >= 0 && i2 < dims_data[2]); + TFLITE_DCHECK(i3 >= 0 && i3 < dims_data[3]); + TFLITE_DCHECK(i4 >= 0 && i4 < dims_data[4]); + return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) * + dims_data[4] + + i4; +} + inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) { TFLITE_DCHECK(i0 >= 0 && i0 < dims.sizes[0]); TFLITE_DCHECK(i1 >= 0 && i1 < dims.sizes[1]); @@ -840,6 +872,19 @@ struct ConvParams { float float_activation_max; }; +struct Conv3DParams { + Padding3DValues padding_values; + int stride_width; + int stride_height; + int stride_depth; + int dilation_width; + int dilation_height; + int dilation_depth; + // float activation params. + float float_activation_min; + float float_activation_max; +}; + struct DepthToSpaceParams { int32_t block_size; }; @@ -907,6 +952,7 @@ struct FullyConnectedParams { struct GatherParams { int16_t axis; + int16_t batch_dims; }; struct L2NormalizationParams { @@ -1025,9 +1071,9 @@ struct ResizeNearestNeighborParams { struct SliceParams { int8_t begin_count; - int32_t begin[4]; + int32_t begin[5]; int8_t size_count; - int32_t size[4]; + int32_t size[5]; }; struct SoftmaxParams { diff --git a/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.cc b/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.cc index f986655f..c8fbea6d 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.cc +++ b/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.cc @@ -21,12 +21,19 @@ limitations under the License. #include #include #include +#ifndef TF_LITE_STATIC_MEMORY +#include +#endif // TF_LITE_STATIC_MEMORY #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" +#if defined(__APPLE__) +#include "TargetConditionals.h" +#endif + namespace tflite { namespace { @@ -283,8 +290,7 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, double* multiplier) { const double input_product_scale = static_cast(input->params.scale) * static_cast(filter->params.scale); - // TODO(ahentz): The following conditions must be guaranteed by the training - // pipeline. + // The following conditions must be guaranteed by the training pipeline. if (bias) { const double bias_scale = static_cast(bias->params.scale); // Here we're making sure the input_product_scale & bias_scale are about the @@ -383,9 +389,25 @@ bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { return TfLiteIntArrayEqual(input1->dims, input2->dims); } -// TODO(petewarden): Having macros around this is ugly, look at other strategies -// before replicating this approach elsewhere. #ifndef TF_LITE_STATIC_MEMORY + +// TODO(b/172067338): Having this function be part of TF_LITE_STATIC_MEMORY +// build results in a 6KB size increase, even though the function is unsused for +// that build. What appears to be happening is that while the linker drops the +// unsused function, the string library that gets pulled in is not dropped, +// resulting in the increased binary size. +std::string GetShapeDebugString(const TfLiteIntArray* shape) { + std::string str; + for (int d = 0; d < shape->size; ++d) { + if (str.empty()) + str = "[" + std::to_string(shape->data[d]); + else + str += ", " + std::to_string(shape->data[d]); + } + str += "]"; + return str; +} + TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, @@ -402,7 +424,13 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, for (int i = 0; i < out_dims; ++i) { int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); - TF_LITE_ENSURE(context, d1 == d2 || d1 == 1 || d2 == 1); + if (!(d1 == d2 || d1 == 1 || d2 == 1)) { + context->ReportError(context, + "Given shapes, %s and %s, are not broadcastable.", + GetShapeDebugString(input1->dims).c_str(), + GetShapeDebugString(input2->dims).c_str()); + return kTfLiteError; + } shape->data[out_dims - i - 1] = std::max(d1, d2); } *output_shape = shape.release(); @@ -425,9 +453,15 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); int max_value = std::max(std::max(d1, d2), d3); - TF_LITE_ENSURE(context, d1 == 1 || d1 == max_value); - TF_LITE_ENSURE(context, d2 == 1 || d2 == max_value); - TF_LITE_ENSURE(context, d3 == 1 || d3 == max_value); + if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) || + !(d3 == 1 || d3 == max_value)) { + context->ReportError( + context, "Given shapes, %s, %s and %s, are not broadcastable.", + GetShapeDebugString(input1->dims).c_str(), + GetShapeDebugString(input2->dims).c_str(), + GetShapeDebugString(input3->dims).c_str()); + return kTfLiteError; + } shape->data[out_dims - i - 1] = max_value; } *output_shape = shape.release(); @@ -458,9 +492,15 @@ int TfLiteTypeGetSize(TfLiteType type) { case kTfLiteInt32: TF_LITE_ASSERT_EQ(sizeof(int32_t), 4); return 4; + case kTfLiteUInt32: + TF_LITE_ASSERT_EQ(sizeof(uint32_t), 4); + return 4; case kTfLiteInt64: TF_LITE_ASSERT_EQ(sizeof(int64_t), 8); return 8; + case kTfLiteUInt64: + TF_LITE_ASSERT_EQ(sizeof(uint64_t), 8); + return 8; case kTfLiteFloat64: TF_LITE_ASSERT_EQ(sizeof(double), 8); return 8; @@ -475,4 +515,15 @@ int TfLiteTypeGetSize(TfLiteType type) { } } +bool IsMobilePlatform() { +#if defined(ANDROID) || defined(__ANDROID__) + return true; +#elif defined(__APPLE__) +#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE + return true; +#endif +#endif + return false; +} + } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.h b/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.h index 7a1aa165..94418425 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.h @@ -288,6 +288,9 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, // Return the size of given type in bytes. Return 0 in in case of string. int TfLiteTypeGetSize(TfLiteType type); +// Whether the current platform is mobile (Android or iOS). +bool IsMobilePlatform(); + } // namespace tflite #endif // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_ diff --git a/code/components/tfmicro/tensorflow/lite/kernels/op_macros.h b/code/components/tfmicro/tensorflow/lite/kernels/op_macros.h index 5786756f..293dc76e 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/op_macros.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/op_macros.h @@ -57,7 +57,7 @@ inline void InfiniteLoop() { #endif // TF_LITE_MCU_DEBUG_LOG -#ifdef NDEBUG +#if defined(NDEBUG) || defined(ARDUINO) #define TFLITE_ASSERT_FALSE (static_cast(0)) #else #define TFLITE_ASSERT_FALSE TFLITE_ABORT diff --git a/code/components/tfmicro/tensorflow/lite/kernels/padding.h b/code/components/tfmicro/tensorflow/lite/kernels/padding.h index 1116b1da..d41e4715 100644 --- a/code/components/tfmicro/tensorflow/lite/kernels/padding.h +++ b/code/components/tfmicro/tensorflow/lite/kernels/padding.h @@ -16,6 +16,7 @@ limitations under the License. #define TENSORFLOW_LITE_KERNELS_PADDING_H_ #include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/internal/types.h" namespace tflite { @@ -75,6 +76,36 @@ inline TfLitePaddingValues ComputePaddingHeightWidth( padding_values.width_offset = offset; return padding_values; } + +inline Padding3DValues ComputePadding3DValues( + int stride_height, int stride_width, int stride_depth, + int dilation_rate_height, int dilation_rate_width, int dilation_rate_depth, + int in_height, int in_width, int in_depth, int filter_height, + int filter_width, int filter_depth, TfLitePadding padding, int* out_height, + int* out_width, int* out_depth) { + *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, + dilation_rate_width); + *out_height = ComputeOutSize(padding, in_height, filter_height, stride_height, + dilation_rate_height); + *out_depth = ComputeOutSize(padding, in_depth, filter_depth, stride_depth, + dilation_rate_depth); + + Padding3DValues padding_values; + int offset = 0; + padding_values.depth = + ComputePaddingWithOffset(stride_depth, dilation_rate_depth, in_depth, + filter_depth, *out_depth, &offset); + padding_values.depth_offset = offset; + padding_values.height = + ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height, + filter_height, *out_height, &offset); + padding_values.height_offset = offset; + padding_values.width = + ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width, + filter_width, *out_width, &offset); + padding_values.width_offset = offset; + return padding_values; +} } // namespace tflite #endif // TENSORFLOW_LITE_KERNELS_PADDING_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.cc b/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.cc index 0a2a0c0f..90824e97 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.cc @@ -1,8 +1,11 @@ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -15,35 +18,35 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/micro_ops.h" namespace tflite { -namespace ops { -namespace micro { -namespace custom { -TfLiteRegistration* Register_ETHOSU(); -const char* GetString_ETHOSU(); -} // namespace custom -} // namespace micro -} // namespace ops AllOpsResolver::AllOpsResolver() { // Please keep this list of Builtin Operators in alphabetical order. AddAbs(); AddAdd(); + AddAddN(); AddArgMax(); AddArgMin(); AddAveragePool2D(); + AddBatchToSpaceNd(); AddCeil(); AddConcatenation(); AddConv2D(); AddCos(); AddDepthwiseConv2D(); AddDequantize(); + AddDetectionPostprocess(); + AddDiv(); + AddElu(); AddEqual(); + AddEthosU(); AddFloor(); AddFullyConnected(); AddGreater(); AddGreaterEqual(); AddHardSwish(); AddL2Normalization(); + AddL2Pool2D(); + AddLeakyRelu(); AddLess(); AddLessEqual(); AddLog(); @@ -51,8 +54,8 @@ AllOpsResolver::AllOpsResolver() { AddLogicalNot(); AddLogicalOr(); AddLogistic(); - AddMaximum(); AddMaxPool2D(); + AddMaximum(); AddMean(); AddMinimum(); AddMul(); @@ -73,22 +76,18 @@ AllOpsResolver::AllOpsResolver() { AddShape(); AddSin(); AddSoftmax(); + AddSpaceToBatchNd(); AddSplit(); AddSplitV(); AddSqrt(); AddSquare(); + AddSqueeze(); AddStridedSlice(); AddSub(); AddSvdf(); AddTanh(); + AddTransposeConv(); AddUnpack(); - - // TODO(b/159644355): Figure out if custom Ops belong in AllOpsResolver. - TfLiteRegistration* registration = - tflite::ops::micro::custom::Register_ETHOSU(); - if (registration) { - AddCustom(tflite::ops::micro::custom::GetString_ETHOSU(), registration); - } } } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.h b/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.h index e8105b96..391b4f08 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.h +++ b/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.h @@ -1,8 +1,11 @@ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/code/components/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc b/code/components/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc deleted file mode 100644 index 834f44ca..00000000 --- a/code/components/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc +++ /dev/null @@ -1,2898 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h" - -// Keep model aligned to 8 bytes to guarantee aligned 64-bit accesses. -alignas(8) const unsigned char g_keyword_scrambled_model_data[] = { - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xd0, 0x6e, 0x00, 0x00, - 0xe4, 0x85, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xbc, 0x6e, 0x00, 0x00, 0xac, 0x56, 0x00, 0x00, 0x9c, 0x52, 0x00, 0x00, - 0x8c, 0x51, 0x00, 0x00, 0x7c, 0x4d, 0x00, 0x00, 0x2c, 0x4d, 0x00, 0x00, - 0x1c, 0x49, 0x00, 0x00, 0x0c, 0x45, 0x00, 0x00, 0xfc, 0x43, 0x00, 0x00, - 0xec, 0x3f, 0x00, 0x00, 0x9c, 0x3f, 0x00, 0x00, 0x8c, 0x3b, 0x00, 0x00, - 0x7c, 0x37, 0x00, 0x00, 0x6c, 0x36, 0x00, 0x00, 0x5c, 0x32, 0x00, 0x00, - 0x0c, 0x32, 0x00, 0x00, 0xfc, 0x2d, 0x00, 0x00, 0xec, 0x29, 0x00, 0x00, - 0xdc, 0x28, 0x00, 0x00, 0xcc, 0x24, 0x00, 0x00, 0x7c, 0x24, 0x00, 0x00, - 0x6c, 0x22, 0x00, 0x00, 0x5c, 0x1a, 0x00, 0x00, 0xcc, 0x19, 0x00, 0x00, - 0xbc, 0x15, 0x00, 0x00, 0xac, 0x0d, 0x00, 0x00, 0x1c, 0x0d, 0x00, 0x00, - 0x0c, 0x09, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, 0x6c, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x2a, 0x91, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x34, 0xe1, 0x4f, 0xa1, - 0x63, 0xa4, 0x62, 0xbf, 0x3e, 0x91, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xa3, 0xb2, 0x8f, 0xee, 0x35, 0xe6, 0xf2, 0xcc, - 0x68, 0xa0, 0x33, 0xc4, 0x7d, 0x4e, 0xbb, 0xa9, 0x10, 0x32, 0x8e, 0x3d, - 0x76, 0x14, 0x1c, 0x33, 0x0e, 0x77, 0xf7, 0xc8, 0x7b, 0x45, 0xc7, 0xdb, - 0xcf, 0x87, 0xc7, 0x70, 0xa9, 0x29, 0xfd, 0x70, 0x32, 0x96, 0x35, 0x7d, - 0xe9, 0xac, 0x6d, 0x9b, 0xfd, 0xe4, 0xbc, 0x4a, 0x57, 0xcd, 0x43, 0xcc, - 0x73, 0x72, 0xdf, 0x07, 0x68, 0xc5, 0x67, 0xbd, 0x8a, 0x91, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0, 0xfb, 0x5f, 0xdf, - 0x0e, 0xb9, 0xa2, 0xfd, 0x66, 0x86, 0x13, 0x1b, 0x6d, 0x1d, 0x53, 0xdb, - 0x83, 0xbf, 0x44, 0x29, 0x3f, 0x93, 0xee, 0x42, 0x9a, 0xf4, 0x31, 0x6e, - 0xc3, 0x15, 0x7e, 0x48, 0x72, 0x50, 0xc3, 0x53, 0xef, 0x35, 0x1f, 0xc2, - 0x29, 0x42, 0xb4, 0xd7, 0x4b, 0xd7, 0x98, 0x60, 0xb9, 0x3e, 0xbb, 0x31, - 0x35, 0xc3, 0xf6, 0x15, 0x7a, 0x9a, 0x2c, 0xfd, 0xff, 0x04, 0xd9, 0x04, - 0x57, 0x52, 0xae, 0x99, 0xa3, 0x95, 0xae, 0x6a, 0x66, 0x52, 0x5f, 0x91, - 0x17, 0x83, 0x0d, 0x27, 0x16, 0x02, 0x06, 0x64, 0x80, 0x05, 0x99, 0x1c, - 0x6c, 0xab, 0xb1, 0xa1, 0x0e, 0x44, 0x1f, 0x63, 0xe9, 0xc1, 0xab, 0x8d, - 0x08, 0x79, 0x56, 0xe0, 0x90, 0xa5, 0xb8, 0x3b, 0xc4, 0x1e, 0xa5, 0x1f, - 0x64, 0xe4, 0x0b, 0x72, 0x62, 0x19, 0x5f, 0x66, 0xc0, 0x9b, 0x7b, 0xc4, - 0xe5, 0x9f, 0x82, 0xa7, 0x16, 0x92, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x08, 0x00, 0x00, 0x3e, 0x3d, 0xf4, 0x61, 0x45, 0x2a, 0x48, 0x53, - 0x1f, 0x22, 0x74, 0x65, 0xea, 0x5a, 0x00, 0x83, 0x68, 0xf9, 0xbb, 0xa3, - 0xc2, 0x1a, 0x8f, 0xe1, 0xfb, 0x76, 0x6a, 0xe9, 0x1a, 0x0e, 0x4d, 0x32, - 0xc6, 0xf3, 0x8d, 0x85, 0x54, 0xa1, 0xe9, 0xb8, 0x35, 0xee, 0xba, 0x53, - 0x40, 0xa2, 0xea, 0x7f, 0xc3, 0x99, 0x71, 0x17, 0xdd, 0xd5, 0xfe, 0xdf, - 0x5e, 0x15, 0xa0, 0x73, 0xf8, 0x78, 0x49, 0x73, 0xcc, 0xf0, 0x18, 0x12, - 0x06, 0x81, 0xd6, 0x19, 0x2c, 0xa8, 0xd7, 0x80, 0x19, 0x19, 0xbf, 0x1e, - 0x50, 0xb1, 0xfb, 0xb3, 0xa6, 0x56, 0x6f, 0x52, 0xa6, 0xc0, 0xdd, 0x3f, - 0xbb, 0x13, 0x6e, 0x04, 0xdf, 0x79, 0xca, 0x8b, 0xa5, 0x9c, 0xa1, 0x78, - 0x49, 0xca, 0xe5, 0x29, 0xbb, 0x29, 0x7c, 0x96, 0xc6, 0x29, 0x06, 0x99, - 0xec, 0x50, 0xd1, 0xe8, 0x9b, 0xb7, 0x53, 0xd2, 0x36, 0x89, 0xb1, 0x5c, - 0x38, 0xf4, 0x2f, 0xa1, 0xda, 0x6f, 0xd8, 0xd1, 0x62, 0xd2, 0xd4, 0x97, - 0xce, 0xf1, 0xbd, 0x73, 0x2d, 0x92, 0xdb, 0x62, 0x0c, 0xb0, 0x77, 0xed, - 0x32, 0x3a, 0xfc, 0x59, 0x94, 0xef, 0x2b, 0x48, 0x60, 0xb2, 0x82, 0xa2, - 0xb6, 0x51, 0xdb, 0x51, 0x47, 0x99, 0x4c, 0x50, 0x93, 0x53, 0x9d, 0xa9, - 0x3c, 0x94, 0x34, 0x9f, 0xa6, 0x3e, 0x4f, 0x87, 0xd4, 0xa0, 0x40, 0xeb, - 0x7b, 0xfa, 0x1b, 0x7d, 0x03, 0xa8, 0xf8, 0x8b, 0xa5, 0x32, 0x3a, 0xaf, - 0x7e, 0x6b, 0x25, 0x08, 0x97, 0x71, 0x8d, 0x0c, 0x30, 0xc9, 0xa7, 0x23, - 0xe3, 0x51, 0xb3, 0xf2, 0x86, 0xad, 0x12, 0xe2, 0x79, 0x94, 0x7f, 0xf3, - 0xf7, 0x88, 0x67, 0x3e, 0x8e, 0x8e, 0x04, 0x5e, 0x4f, 0x01, 0x6f, 0x1d, - 0x78, 0x42, 0x9e, 0x47, 0x81, 0xdf, 0x03, 0x39, 0x3d, 0x9b, 0xbd, 0xb6, - 0x06, 0x21, 0x82, 0xfe, 0xf2, 0x50, 0xe1, 0x14, 0xbc, 0xe3, 0x5e, 0xe1, - 0xbd, 0x8f, 0xfa, 0x35, 0x31, 0x4e, 0x66, 0xeb, 0x67, 0x49, 0x1c, 0x07, - 0x88, 0xb6, 0x22, 0x0c, 0xeb, 0xd9, 0x9f, 0x9b, 0x8b, 0xe0, 0x9c, 0x3c, - 0xf7, 0x91, 0xab, 0x98, 0x5b, 0x0e, 0x09, 0xdd, 0xe3, 0x0b, 0x14, 0x55, - 0xe9, 0xe4, 0x42, 0xd8, 0xce, 0xd7, 0xfd, 0x4c, 0x20, 0x9f, 0x44, 0x93, - 0xa6, 0x17, 0x8a, 0x68, 0x8f, 0xec, 0x62, 0xd1, 0x97, 0x9c, 0xcc, 0xc4, - 0xd9, 0x42, 0xda, 0xf1, 0x34, 0x04, 0xc6, 0xb6, 0x0f, 0xc7, 0xe6, 0x2d, - 0x26, 0x6e, 0x6f, 0x92, 0x7e, 0xd9, 0xd4, 0x40, 0xc6, 0x70, 0xfa, 0x12, - 0x2a, 0x1b, 0xbc, 0x50, 0xeb, 0x3b, 0x24, 0x96, 0x8d, 0x7c, 0xae, 0xbe, - 0xc3, 0x27, 0xce, 0x97, 0xcf, 0xcd, 0x10, 0x13, 0x01, 0xc6, 0x48, 0x6a, - 0x99, 0x38, 0x79, 0xb9, 0x1c, 0xc9, 0x09, 0xac, 0x96, 0x8c, 0xf7, 0x82, - 0x8f, 0xb8, 0x17, 0x94, 0x2c, 0x5f, 0x40, 0xcc, 0x80, 0xf4, 0x9f, 0xaa, - 0xcb, 0x83, 0x13, 0x7b, 0x3a, 0x78, 0x0a, 0x9f, 0x79, 0x9e, 0xfc, 0x0e, - 0x8f, 0x98, 0x60, 0x39, 0x86, 0x44, 0x8e, 0x4b, 0xc4, 0xad, 0xe6, 0x98, - 0x92, 0x08, 0x84, 0x48, 0x8f, 0x1d, 0x78, 0x10, 0x9e, 0xf7, 0xb8, 0x61, - 0x65, 0x46, 0xdb, 0x4a, 0xcf, 0xc5, 0x37, 0xe3, 0x77, 0x76, 0xcf, 0x0a, - 0x7e, 0x72, 0x3f, 0xe4, 0x51, 0x30, 0x28, 0x57, 0x13, 0xfd, 0xdb, 0x7e, - 0xd6, 0xa3, 0xdd, 0x64, 0xdd, 0x00, 0xd0, 0x7f, 0xbc, 0x48, 0x1d, 0xaf, - 0xde, 0x0e, 0x45, 0xc4, 0xc9, 0xfa, 0xf6, 0xb2, 0xb7, 0x9a, 0x42, 0x8b, - 0x18, 0x08, 0xed, 0xdb, 0xa9, 0xc3, 0x32, 0xf1, 0x9c, 0xcf, 0x16, 0x74, - 0x57, 0xce, 0xe9, 0x44, 0x21, 0xdb, 0x8a, 0x45, 0x89, 0x70, 0x41, 0x5c, - 0xbf, 0x10, 0xdf, 0x83, 0x4a, 0xe4, 0x4c, 0xd8, 0xc9, 0x2e, 0x5b, 0xa3, - 0x05, 0xed, 0x73, 0xb1, 0xb0, 0xb7, 0xc4, 0xd7, 0x0d, 0xea, 0xf6, 0xb4, - 0xc1, 0x5e, 0x12, 0x54, 0x30, 0x73, 0x5c, 0x93, 0xd9, 0xf7, 0xc9, 0x24, - 0x43, 0x8f, 0x4f, 0x8e, 0x94, 0x95, 0xb6, 0xfd, 0xa3, 0x14, 0x42, 0x50, - 0xb8, 0x66, 0xfb, 0xc4, 0xed, 0x72, 0xcf, 0x7b, 0xa9, 0x73, 0xeb, 0xc4, - 0x4a, 0x05, 0xea, 0xb4, 0x47, 0xca, 0x21, 0x56, 0x28, 0xa8, 0x87, 0xb8, - 0x87, 0x0b, 0xe3, 0x8d, 0xfd, 0x70, 0xf7, 0x33, 0x76, 0xf0, 0x3d, 0xa4, - 0x3b, 0x83, 0xab, 0x14, 0x01, 0xe1, 0xb0, 0xa9, 0x44, 0xe8, 0xd7, 0x50, - 0x26, 0x0b, 0xbb, 0x2d, 0x57, 0x39, 0x82, 0x7c, 0x71, 0xd8, 0x12, 0xaf, - 0xf3, 0x9f, 0x46, 0xbd, 0x62, 0xd6, 0x61, 0xf5, 0xb7, 0x04, 0x94, 0xbf, - 0x87, 0xea, 0xc4, 0xc4, 0x33, 0xcf, 0x36, 0x3b, 0x4f, 0xc7, 0x71, 0xf1, - 0x98, 0xe6, 0xb0, 0x96, 0x25, 0xd7, 0xac, 0x75, 0xfc, 0x92, 0xe0, 0x69, - 0x72, 0x37, 0x8d, 0x40, 0x31, 0xaa, 0x2c, 0x86, 0xfb, 0x95, 0x3f, 0x9c, - 0x23, 0xd4, 0x39, 0x99, 0xff, 0xea, 0x95, 0x79, 0xb9, 0x2e, 0xb0, 0x33, - 0xf1, 0xe8, 0xd0, 0x42, 0xb5, 0x70, 0x5c, 0xca, 0x69, 0x48, 0x28, 0x23, - 0x58, 0xb4, 0x07, 0xfc, 0x3e, 0x15, 0x29, 0x00, 0xa9, 0x22, 0x44, 0x70, - 0xd0, 0xc7, 0x01, 0x0d, 0x3e, 0xfc, 0x57, 0xb7, 0x54, 0x3a, 0xc3, 0x43, - 0xd6, 0x2f, 0x55, 0x09, 0x52, 0x4a, 0x6b, 0x8e, 0x4c, 0x82, 0xbb, 0x4e, - 0x3e, 0x38, 0xe1, 0x9e, 0x72, 0x83, 0xec, 0x40, 0xf5, 0xf7, 0x0e, 0x3c, - 0x24, 0xed, 0xda, 0xf2, 0x39, 0x6c, 0xad, 0xeb, 0xff, 0xfb, 0x4a, 0x38, - 0x50, 0x49, 0x28, 0x3d, 0x05, 0xb2, 0x98, 0x44, 0x2b, 0x61, 0xa2, 0x9b, - 0x3a, 0x3c, 0xad, 0xd9, 0x8c, 0xef, 0x3c, 0x72, 0x50, 0x74, 0x13, 0x80, - 0xc4, 0x7e, 0x6e, 0xf3, 0xc9, 0xdf, 0x63, 0xf6, 0x41, 0xb2, 0x08, 0x78, - 0x9b, 0x7c, 0xa9, 0x13, 0xd1, 0x21, 0xe7, 0x5e, 0x6a, 0x0d, 0x64, 0xf7, - 0x52, 0x75, 0xf2, 0x80, 0x69, 0xbe, 0x43, 0xf8, 0xd4, 0xad, 0x49, 0xfc, - 0x97, 0x76, 0x1c, 0xb6, 0x43, 0x9e, 0xcb, 0x45, 0x4d, 0x75, 0x07, 0xae, - 0xdb, 0xbf, 0xf5, 0x8a, 0xeb, 0xb9, 0x6b, 0x12, 0x06, 0xbf, 0x94, 0xad, - 0x77, 0x29, 0xb1, 0xae, 0x24, 0x9b, 0x4d, 0xdc, 0xe1, 0x5e, 0xd7, 0x57, - 0xec, 0xd1, 0xd8, 0xad, 0xf0, 0x06, 0x08, 0x43, 0x33, 0x99, 0xd2, 0x04, - 0xfc, 0xc8, 0xf6, 0x53, 0x3d, 0x73, 0xd4, 0x36, 0xd3, 0x8e, 0x4a, 0xcd, - 0xb1, 0xe9, 0xcb, 0x3a, 0x5f, 0x54, 0xbc, 0xde, 0x16, 0xa2, 0x85, 0xde, - 0x35, 0x27, 0x99, 0x32, 0x4f, 0xb9, 0x2c, 0x16, 0xa2, 0x6e, 0xae, 0x75, - 0x60, 0x77, 0xe9, 0x08, 0x0f, 0x08, 0xc4, 0xd0, 0x62, 0xc7, 0xd2, 0x1f, - 0x3b, 0x29, 0xdd, 0xb7, 0xea, 0xa3, 0x58, 0xaf, 0x4c, 0x05, 0xd2, 0x82, - 0x6a, 0xe0, 0xc4, 0xe9, 0x70, 0x7e, 0xf2, 0xca, 0x82, 0x6a, 0xae, 0xc1, - 0x9a, 0x42, 0x5d, 0x46, 0x4a, 0xb7, 0x8f, 0x4d, 0x33, 0xfe, 0x6f, 0x47, - 0xb5, 0x49, 0xb3, 0x89, 0x51, 0x31, 0x74, 0x68, 0x14, 0xda, 0x0a, 0x41, - 0x3d, 0x1f, 0x8e, 0x30, 0x8c, 0x77, 0xd1, 0xa9, 0x36, 0x41, 0x78, 0x34, - 0xb7, 0x7e, 0x4e, 0x7a, 0x77, 0x12, 0x43, 0x97, 0x43, 0xba, 0xd6, 0x28, - 0x14, 0x2a, 0x9f, 0x98, 0xb4, 0x39, 0x08, 0x5c, 0xb7, 0xb8, 0x03, 0x63, - 0x62, 0x68, 0xc6, 0x9a, 0x4d, 0xf5, 0xdc, 0x7c, 0x0f, 0x7e, 0x77, 0xdc, - 0x85, 0x53, 0x31, 0x8c, 0x53, 0x8b, 0x27, 0xc4, 0xb7, 0x3d, 0xd0, 0x94, - 0x9b, 0x7e, 0x59, 0x59, 0x03, 0x09, 0x8c, 0x30, 0x70, 0x7d, 0x9c, 0x73, - 0x89, 0x6c, 0x5f, 0xbf, 0xf9, 0xc7, 0x72, 0x76, 0x12, 0x98, 0xe3, 0xbe, - 0xc3, 0x67, 0xdf, 0xa1, 0x76, 0xa3, 0xec, 0x44, 0x30, 0x70, 0x2f, 0x6a, - 0x86, 0x28, 0xb9, 0x9d, 0x7f, 0x93, 0xf2, 0x4a, 0x34, 0x48, 0x1f, 0x2e, - 0x2e, 0x95, 0x88, 0xdb, 0x1f, 0x2c, 0x19, 0x46, 0x2e, 0x91, 0x5f, 0x81, - 0x0d, 0x08, 0x9d, 0x03, 0x0b, 0xaf, 0x59, 0x0a, 0x41, 0xad, 0x4d, 0x6c, - 0x09, 0x0e, 0x9f, 0xd1, 0xc4, 0xdb, 0xac, 0x59, 0x27, 0x04, 0x1c, 0x73, - 0xe9, 0xf3, 0xe8, 0x54, 0xd9, 0x11, 0x31, 0xb2, 0xed, 0x2d, 0x8c, 0xeb, - 0x99, 0x26, 0x48, 0x9e, 0xac, 0x88, 0x96, 0xcb, 0x19, 0x49, 0xfa, 0x4a, - 0x82, 0xd5, 0x5d, 0xb8, 0x0f, 0x22, 0x3f, 0xb6, 0x5c, 0x02, 0x2a, 0xb9, - 0xd9, 0xfe, 0x4d, 0x9d, 0xdb, 0x85, 0x90, 0x19, 0x7f, 0x1a, 0x44, 0xa3, - 0x74, 0x68, 0xbf, 0xa2, 0x3b, 0xb4, 0x3b, 0xeb, 0xab, 0x99, 0xc2, 0x46, - 0x50, 0x7e, 0xec, 0xa9, 0xb4, 0x86, 0xfa, 0x50, 0xcb, 0x71, 0x7e, 0x75, - 0xa5, 0xca, 0xa6, 0x2f, 0x40, 0x1d, 0xa1, 0x4a, 0x5c, 0x91, 0xd7, 0x2a, - 0xa6, 0x17, 0x11, 0x4d, 0x19, 0x2b, 0xb3, 0x0f, 0xf0, 0xb3, 0x06, 0x70, - 0x51, 0x5c, 0x52, 0x8c, 0xdf, 0xe3, 0x19, 0x92, 0x08, 0x40, 0xa2, 0xb4, - 0xc0, 0xf2, 0xe8, 0x44, 0xcc, 0x36, 0xaa, 0xf9, 0xf8, 0xfc, 0x2d, 0x83, - 0x79, 0xc6, 0x58, 0xc1, 0xdf, 0x32, 0xb7, 0xde, 0x0f, 0x3e, 0xc0, 0xa8, - 0x7e, 0xeb, 0xf2, 0x30, 0x16, 0xdf, 0x38, 0xcb, 0x69, 0xd9, 0x44, 0x0d, - 0x44, 0xf4, 0x45, 0x9c, 0x81, 0xc8, 0xe7, 0x06, 0xae, 0x95, 0xaf, 0xff, - 0x17, 0x3b, 0x1c, 0x3f, 0xda, 0xa5, 0xf8, 0xfd, 0x9c, 0xf1, 0x0a, 0xca, - 0xda, 0xc0, 0xfa, 0x02, 0xc4, 0xce, 0x78, 0xfb, 0x35, 0x8c, 0xfe, 0x55, - 0xad, 0x0d, 0x9b, 0xeb, 0x10, 0xf1, 0x7b, 0xb1, 0x09, 0xf8, 0xef, 0xfc, - 0xde, 0x7a, 0x69, 0x74, 0x76, 0xef, 0x91, 0x64, 0x33, 0xc4, 0x08, 0x15, - 0x73, 0x85, 0x56, 0xae, 0x9c, 0xf6, 0xdd, 0x55, 0x19, 0x96, 0xe6, 0x41, - 0x12, 0xc9, 0x87, 0x91, 0x9e, 0xc6, 0x18, 0xe8, 0xbf, 0xa0, 0x59, 0xfd, - 0x20, 0xab, 0xb5, 0xcf, 0x0f, 0x6e, 0x30, 0xd3, 0xc5, 0x70, 0xf2, 0x50, - 0xa4, 0x2a, 0xdf, 0xb0, 0x45, 0xfc, 0x82, 0x1a, 0x3b, 0xfe, 0x0c, 0xad, - 0x41, 0x95, 0xf1, 0xd6, 0x85, 0xa2, 0xc9, 0xff, 0xbe, 0x3a, 0x64, 0x70, - 0x43, 0xc0, 0xc5, 0xc8, 0x80, 0x11, 0x0d, 0x20, 0xcd, 0xf2, 0xa2, 0xbb, - 0x43, 0x68, 0x0e, 0xf4, 0x01, 0xb3, 0x73, 0x79, 0x9f, 0x68, 0x41, 0x63, - 0x3e, 0xda, 0xf9, 0xf4, 0x23, 0x57, 0x97, 0x84, 0x99, 0xe8, 0x5e, 0xdb, - 0xaa, 0x24, 0xab, 0x9c, 0x40, 0x83, 0xf9, 0x3f, 0x4f, 0x5a, 0x53, 0xa6, - 0xf1, 0xe8, 0x95, 0xcf, 0xcb, 0x50, 0x13, 0x51, 0xa7, 0x8c, 0x71, 0x1d, - 0xff, 0xcc, 0x66, 0xab, 0xff, 0xca, 0xc5, 0xc3, 0x73, 0x45, 0xb7, 0x21, - 0x1d, 0x65, 0x7a, 0xe5, 0x1f, 0x3f, 0x1a, 0x58, 0x23, 0x28, 0xc8, 0xf3, - 0xbf, 0x98, 0x25, 0xc0, 0x83, 0x68, 0xf0, 0x62, 0x63, 0x90, 0xcf, 0x1f, - 0x20, 0xb8, 0x04, 0x5c, 0xc4, 0x80, 0x5b, 0xf4, 0x6d, 0xdc, 0xe9, 0xac, - 0xd8, 0x13, 0x3b, 0x42, 0xf8, 0x4e, 0xa2, 0x1c, 0xce, 0x3f, 0x8d, 0x15, - 0xd3, 0x87, 0x1b, 0x44, 0x79, 0x52, 0x34, 0x4b, 0x63, 0x4d, 0xbf, 0x95, - 0xec, 0xae, 0xf9, 0xc6, 0x7b, 0x7b, 0x85, 0x8c, 0x4f, 0x20, 0x58, 0x9d, - 0x48, 0x03, 0x2f, 0x77, 0x2e, 0x8b, 0x6f, 0x66, 0x76, 0xb9, 0xb8, 0xb7, - 0x34, 0x5a, 0x63, 0x06, 0x85, 0x82, 0x5f, 0x23, 0x8f, 0x8d, 0x0c, 0x92, - 0x3b, 0xd2, 0x8a, 0x1b, 0x39, 0xee, 0x6a, 0xbc, 0xf6, 0x94, 0x2a, 0xc6, - 0x73, 0xa6, 0x99, 0x98, 0xdc, 0x96, 0xd7, 0xc1, 0xfe, 0x9b, 0xc8, 0xfb, - 0x86, 0x5a, 0xad, 0xce, 0xf8, 0xd5, 0x32, 0x62, 0x96, 0x63, 0xaf, 0x4c, - 0x4a, 0xae, 0xec, 0x26, 0x3d, 0x84, 0x69, 0x50, 0x5f, 0x37, 0x9b, 0x29, - 0xac, 0x15, 0x76, 0x3d, 0x33, 0x96, 0x06, 0xde, 0xc1, 0x6d, 0xa2, 0xc7, - 0xc3, 0x8a, 0x20, 0x2e, 0xf7, 0x08, 0x55, 0x83, 0x23, 0x9c, 0x23, 0x2d, - 0x3a, 0xa1, 0x32, 0xbc, 0x47, 0x48, 0xd5, 0x6a, 0x71, 0xb9, 0xcc, 0x2d, - 0x99, 0xa0, 0x37, 0x07, 0x46, 0x45, 0xbe, 0xf0, 0x27, 0x5a, 0x25, 0x72, - 0x58, 0x47, 0x6d, 0xbf, 0x23, 0xdc, 0x48, 0x44, 0x45, 0x95, 0xb1, 0x62, - 0xf1, 0x7e, 0x4c, 0x95, 0x1c, 0xb4, 0x17, 0x8b, 0x59, 0x2e, 0xf3, 0x4f, - 0x45, 0x3b, 0x5d, 0x67, 0x92, 0x52, 0xd8, 0xc1, 0x91, 0xfa, 0x53, 0xaa, - 0x87, 0xc0, 0xa7, 0xb0, 0x9f, 0x10, 0xe8, 0xac, 0x45, 0x52, 0xbb, 0x17, - 0xee, 0xf6, 0x18, 0xbe, 0x02, 0x70, 0xce, 0x79, 0x66, 0x72, 0xf9, 0xf6, - 0xca, 0x66, 0xff, 0xa4, 0x9a, 0xd9, 0xb7, 0x07, 0xa9, 0xc1, 0x23, 0x7e, - 0x7b, 0x9c, 0xe3, 0x02, 0x7a, 0xcc, 0xa3, 0x67, 0xb7, 0xb0, 0x37, 0xba, - 0xae, 0x12, 0xda, 0x48, 0x6e, 0x7f, 0xde, 0x5f, 0x75, 0x15, 0xca, 0xd2, - 0x46, 0xdd, 0xb0, 0x82, 0xbf, 0x6d, 0xe9, 0x51, 0x66, 0xa5, 0x9e, 0x0c, - 0xd5, 0x03, 0xbd, 0x97, 0x0e, 0x1b, 0x88, 0xf6, 0x61, 0x5a, 0x8b, 0xe0, - 0xdd, 0x3e, 0x59, 0x4c, 0x35, 0xfd, 0xb0, 0x3b, 0x79, 0x8c, 0x1c, 0x96, - 0x97, 0x35, 0x62, 0x36, 0x62, 0x4c, 0x4b, 0x46, 0xb1, 0x21, 0xf7, 0xf0, - 0x34, 0xdc, 0xd9, 0x9f, 0xf8, 0x53, 0x7d, 0xca, 0xbc, 0x4d, 0xaf, 0xf4, - 0xb7, 0x2f, 0xa7, 0x5d, 0x18, 0xf9, 0x3b, 0xa9, 0xb0, 0xbb, 0xdf, 0xfa, - 0x28, 0x2b, 0x58, 0xce, 0x46, 0x01, 0x3f, 0x76, 0xf2, 0x39, 0x45, 0x8b, - 0x3c, 0xda, 0x62, 0x2b, 0x6b, 0xe1, 0x5f, 0x14, 0xfc, 0x79, 0x17, 0x2d, - 0xe2, 0xe5, 0x8c, 0xc5, 0xde, 0x91, 0xfd, 0xf5, 0x6d, 0x9b, 0x6b, 0xbb, - 0xb0, 0x13, 0xae, 0xbe, 0x1e, 0xa8, 0x8f, 0x3c, 0xfd, 0x24, 0xbe, 0xb8, - 0x39, 0x80, 0x03, 0x06, 0x8b, 0xff, 0xca, 0x90, 0x88, 0x0f, 0x45, 0xc4, - 0xeb, 0x50, 0x52, 0xf5, 0x00, 0x8c, 0x16, 0x9d, 0x26, 0xaa, 0xec, 0xb1, - 0x44, 0xd6, 0xfe, 0x67, 0xa3, 0xc1, 0xec, 0x4a, 0x12, 0xa6, 0x7c, 0x7c, - 0xc3, 0x46, 0x1c, 0x64, 0x61, 0x67, 0xec, 0xce, 0x1e, 0xa2, 0xb4, 0xdd, - 0x6e, 0x7f, 0x02, 0x14, 0xf4, 0x1c, 0x17, 0xa7, 0x31, 0x9f, 0xc2, 0xc6, - 0xc0, 0x21, 0x41, 0x88, 0x61, 0xd8, 0xca, 0x06, 0xa5, 0xe4, 0xef, 0xa4, - 0xaa, 0x4d, 0xa3, 0xad, 0x5f, 0xd4, 0x0c, 0x6b, 0x14, 0x38, 0x2e, 0xe8, - 0x87, 0x5a, 0x68, 0x10, 0x51, 0xd8, 0xbb, 0xa6, 0xd9, 0xdc, 0xd3, 0x7f, - 0x1f, 0xea, 0xa8, 0xcc, 0x3f, 0x43, 0xa4, 0x04, 0x95, 0xb4, 0xde, 0x2f, - 0x07, 0x5d, 0x91, 0x1c, 0x8e, 0xc3, 0xbc, 0xaa, 0x46, 0x8a, 0xa8, 0x42, - 0xa7, 0x2c, 0x0f, 0x1f, 0xb3, 0xe2, 0x8a, 0x0b, 0xa0, 0x3f, 0xfb, 0x87, - 0x9e, 0x42, 0xa5, 0x60, 0xce, 0x5a, 0x54, 0x91, 0x26, 0x51, 0xea, 0x81, - 0x6f, 0xf1, 0x54, 0x93, 0xe7, 0xa0, 0xf8, 0x64, 0xab, 0x1d, 0x0d, 0x9d, - 0x64, 0x6a, 0xd5, 0x19, 0x03, 0xbb, 0x94, 0x7f, 0x0a, 0xb8, 0x6b, 0x87, - 0xc3, 0x1a, 0x38, 0xe5, 0xe8, 0xba, 0x13, 0x17, 0xeb, 0x13, 0xcc, 0xac, - 0xcb, 0x1f, 0x96, 0x4c, 0x3b, 0x18, 0xfb, 0xe8, 0x5c, 0x54, 0xce, 0x1a, - 0x91, 0x44, 0xf5, 0x49, 0x6c, 0x38, 0x2a, 0x92, 0x8a, 0x0d, 0x3d, 0x08, - 0xc2, 0x5f, 0x6c, 0xac, 0x48, 0xb3, 0xdc, 0x2e, 0xa6, 0x5a, 0xa8, 0xee, - 0x22, 0x9a, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x96, 0xc5, 0x3a, 0x4e, 0x42, 0x7d, 0x27, 0xce, 0x44, 0x84, 0xf1, 0x67, - 0x8c, 0xc5, 0xdd, 0x75, 0x3b, 0x8a, 0xed, 0x2e, 0x29, 0x62, 0x7b, 0xb0, - 0xe6, 0xa3, 0xb4, 0x61, 0x73, 0x10, 0xff, 0x0e, 0x0c, 0x98, 0x74, 0xef, - 0xbb, 0xc4, 0xca, 0x03, 0x88, 0xa4, 0x96, 0x61, 0xef, 0x36, 0x6d, 0xa2, - 0xb1, 0xc8, 0xf0, 0xac, 0xf1, 0xb2, 0x08, 0x56, 0xc7, 0x99, 0xcf, 0xae, - 0x0a, 0x37, 0x85, 0x60, 0x78, 0x2d, 0x14, 0xda, 0xb1, 0xa7, 0x00, 0xb6, - 0x00, 0x04, 0x76, 0x80, 0x0e, 0x9f, 0x2a, 0x30, 0x8b, 0x85, 0xd9, 0xc1, - 0xaf, 0xee, 0x27, 0x80, 0x20, 0xed, 0xef, 0x25, 0x5c, 0x98, 0x6b, 0xcc, - 0xf8, 0x72, 0xfb, 0x3f, 0x13, 0xe6, 0x9b, 0x47, 0xee, 0xa1, 0x18, 0x55, - 0xa0, 0x68, 0xbe, 0xd4, 0x21, 0x59, 0x72, 0xa8, 0xa4, 0xd2, 0x33, 0x57, - 0x50, 0xfc, 0x6b, 0xa8, 0x49, 0x1b, 0x74, 0xdb, 0x5a, 0x16, 0xb8, 0x52, - 0x0c, 0xda, 0xa0, 0xa3, 0xff, 0x33, 0x56, 0x82, 0x0f, 0x0a, 0x90, 0x82, - 0xee, 0xf1, 0x1b, 0xb3, 0x05, 0x44, 0x39, 0x01, 0xf7, 0x1e, 0xff, 0xcb, - 0xea, 0xd0, 0xb6, 0x20, 0xbc, 0x84, 0xb1, 0xf9, 0xa2, 0xc1, 0x56, 0xe6, - 0xfa, 0x47, 0xc9, 0xfd, 0x45, 0x77, 0x51, 0x8e, 0x01, 0xe4, 0x17, 0x20, - 0x6f, 0x99, 0xe3, 0x90, 0x2f, 0xcc, 0xaf, 0xd9, 0x61, 0x32, 0x91, 0x62, - 0x58, 0xf4, 0x98, 0xf5, 0xf4, 0xeb, 0x13, 0xeb, 0xdc, 0x8a, 0xac, 0xb2, - 0x9e, 0xcf, 0xe7, 0xa7, 0xd4, 0x97, 0x22, 0x12, 0x08, 0x10, 0x6d, 0x40, - 0xea, 0x26, 0xea, 0x42, 0x29, 0x6e, 0x75, 0x62, 0x47, 0x08, 0x17, 0xa8, - 0x69, 0x0f, 0xf7, 0x35, 0x59, 0x23, 0x86, 0x83, 0xfd, 0xb5, 0x61, 0x98, - 0x9c, 0x4d, 0x37, 0xda, 0x9f, 0xfc, 0xfb, 0x16, 0xb7, 0x6c, 0x52, 0xee, - 0xa8, 0x9c, 0x3e, 0x93, 0x43, 0xc5, 0x2b, 0xd4, 0xd0, 0x9f, 0x69, 0x2c, - 0xc9, 0x1f, 0x2e, 0xdf, 0x5b, 0xe6, 0xc6, 0x5f, 0x71, 0xd1, 0xd7, 0xb2, - 0x8f, 0x3a, 0xba, 0x60, 0x75, 0x3d, 0x34, 0x41, 0x43, 0x9b, 0x13, 0xc0, - 0x3b, 0x30, 0xc5, 0xe9, 0x84, 0x81, 0xde, 0x85, 0x4e, 0x65, 0x7b, 0x21, - 0x37, 0xb8, 0xef, 0x24, 0x19, 0xaa, 0x26, 0x0c, 0x27, 0xa7, 0xd9, 0x29, - 0x47, 0x1a, 0x15, 0x42, 0x1e, 0x30, 0x79, 0x79, 0x96, 0x09, 0x62, 0x26, - 0xad, 0x98, 0x8b, 0xcb, 0x3d, 0xeb, 0x66, 0x83, 0x77, 0xd9, 0x79, 0x4d, - 0x05, 0x81, 0x72, 0xe9, 0xe0, 0x6f, 0x13, 0x00, 0x7e, 0xa3, 0x92, 0x82, - 0x1c, 0x90, 0x83, 0x4b, 0x15, 0x97, 0x0f, 0x92, 0xe2, 0xd3, 0x3d, 0xd7, - 0x6c, 0xb9, 0x60, 0x9a, 0x23, 0x52, 0xbe, 0x59, 0xc9, 0x36, 0x9e, 0xf7, - 0x77, 0x09, 0x79, 0x01, 0xcc, 0xec, 0x17, 0xd1, 0x74, 0xbc, 0x58, 0x65, - 0x45, 0x3c, 0x86, 0xf1, 0xbc, 0xbd, 0x95, 0x54, 0x46, 0x45, 0x7b, 0x4c, - 0xa2, 0xea, 0x2a, 0x6e, 0xa8, 0xd1, 0x66, 0x03, 0xb2, 0x6a, 0xe0, 0xd3, - 0x07, 0x8d, 0xe0, 0x09, 0x81, 0x42, 0xe3, 0x97, 0xc4, 0xe7, 0x37, 0xc5, - 0x82, 0xcf, 0xb1, 0xec, 0xba, 0xbd, 0xf4, 0xb6, 0x41, 0xb2, 0xb8, 0xa6, - 0x3a, 0x85, 0x4b, 0x4f, 0x46, 0x48, 0xe9, 0x9b, 0x72, 0xf5, 0xb0, 0x64, - 0x66, 0x75, 0x42, 0xb4, 0x00, 0xbe, 0x11, 0x6d, 0x86, 0x93, 0x07, 0x50, - 0xa7, 0xef, 0x55, 0x42, 0xcf, 0xe8, 0x61, 0xd0, 0x9b, 0x11, 0x84, 0x8c, - 0x74, 0xe4, 0xb8, 0x3f, 0x48, 0xb3, 0x61, 0xe3, 0xea, 0x66, 0x86, 0x94, - 0x95, 0x12, 0x77, 0x26, 0x75, 0x30, 0xb5, 0xd3, 0x7a, 0xad, 0x2d, 0x58, - 0x46, 0x1b, 0x4b, 0xd9, 0x2d, 0x1e, 0x0b, 0xff, 0xd7, 0x03, 0x56, 0x3b, - 0xbd, 0x65, 0xb0, 0xf9, 0xfe, 0x43, 0x1c, 0x9c, 0x18, 0x82, 0x78, 0x5e, - 0x06, 0x02, 0x21, 0x70, 0xb2, 0x7f, 0xb5, 0x63, 0x71, 0x85, 0x95, 0x79, - 0xae, 0x1e, 0xc6, 0x62, 0x7a, 0x7c, 0x63, 0x46, 0x70, 0x1c, 0x58, 0x72, - 0x1d, 0xde, 0xca, 0xb4, 0xfc, 0xc8, 0x56, 0x38, 0x32, 0xf4, 0x0b, 0x56, - 0x87, 0x6b, 0x5b, 0x53, 0xd2, 0x2c, 0x35, 0xef, 0x5b, 0x33, 0x59, 0x13, - 0x76, 0x82, 0x30, 0x80, 0x23, 0x10, 0x07, 0x4c, 0x3f, 0xac, 0x9c, 0x58, - 0x2d, 0x04, 0xe6, 0x6a, 0xd3, 0x5c, 0xf9, 0xb6, 0x59, 0x4e, 0x85, 0xfe, - 0x01, 0x71, 0xf0, 0xf7, 0xf2, 0x1f, 0x46, 0xd5, 0x20, 0x3c, 0x9b, 0xc2, - 0x1e, 0x73, 0x1c, 0x56, 0x9c, 0x76, 0x8c, 0x12, 0x95, 0x51, 0xd4, 0x6f, - 0x5b, 0x3a, 0xa7, 0x5f, 0xa7, 0xe4, 0xfa, 0xb7, 0x1a, 0xdd, 0xb6, 0x4c, - 0x01, 0x02, 0xae, 0x9c, 0x02, 0x0d, 0x66, 0x2f, 0x40, 0x87, 0xa1, 0xbc, - 0xf3, 0xde, 0xf4, 0xdb, 0x65, 0xee, 0xcc, 0xca, 0xe1, 0x7a, 0xa2, 0xf4, - 0xf7, 0xf5, 0x7c, 0x2a, 0x3f, 0xa4, 0x67, 0xbb, 0x07, 0x50, 0x7a, 0x29, - 0x8a, 0xcf, 0x2c, 0x7a, 0x0e, 0x0d, 0xc7, 0x95, 0x8b, 0xf4, 0xe2, 0x50, - 0xe1, 0xc1, 0x40, 0x16, 0x99, 0x5c, 0x72, 0xe7, 0xe4, 0x01, 0xeb, 0x29, - 0x6a, 0x99, 0xf2, 0x67, 0x23, 0x46, 0x1f, 0xaa, 0xea, 0xc1, 0x51, 0x30, - 0xeb, 0x7d, 0x34, 0x52, 0x91, 0x37, 0x2d, 0xc6, 0x5c, 0x3a, 0x7c, 0x54, - 0xc0, 0x79, 0xdc, 0xf9, 0xbf, 0x08, 0x2a, 0xf6, 0xe1, 0x1e, 0xee, 0xc6, - 0xd2, 0xe9, 0x30, 0x27, 0x60, 0x0c, 0xa2, 0x63, 0x16, 0x06, 0x3d, 0xe2, - 0xf5, 0x6f, 0xea, 0xe4, 0x4d, 0x9f, 0x2d, 0x36, 0x62, 0x95, 0x47, 0x5d, - 0x00, 0x22, 0x9f, 0x0c, 0xbb, 0x71, 0xad, 0xea, 0xe7, 0x62, 0x59, 0x21, - 0xd1, 0xaf, 0x04, 0x5a, 0xfc, 0x1f, 0x28, 0x6b, 0x6f, 0x71, 0xec, 0xd4, - 0xbd, 0x9c, 0x88, 0xfb, 0x3f, 0x04, 0xea, 0xd6, 0xb2, 0x24, 0xe5, 0x28, - 0xfe, 0xc5, 0x3e, 0x15, 0x00, 0x8c, 0xa2, 0xdf, 0x18, 0x3d, 0x10, 0x9a, - 0xb1, 0xcd, 0x64, 0xda, 0x87, 0x41, 0xc8, 0xa1, 0x1c, 0x97, 0xd5, 0x44, - 0xd9, 0x51, 0xd2, 0x96, 0xed, 0xad, 0x28, 0x1f, 0x03, 0x89, 0x21, 0xbd, - 0x79, 0x91, 0x48, 0x9c, 0x8e, 0x17, 0xfd, 0x36, 0x72, 0xf6, 0x69, 0x4f, - 0x3f, 0x02, 0x57, 0xcc, 0x3f, 0x1c, 0x49, 0x82, 0x00, 0x45, 0x9e, 0x29, - 0x83, 0x14, 0x12, 0xbb, 0xd2, 0xd0, 0x1a, 0x66, 0x0f, 0x57, 0x24, 0xd4, - 0x9f, 0x46, 0x0c, 0xf4, 0xb8, 0x28, 0x85, 0x52, 0xe2, 0xa1, 0xc2, 0x3a, - 0x8c, 0x34, 0x4a, 0x81, 0xe3, 0xbc, 0xa2, 0x67, 0x67, 0x12, 0x13, 0xc4, - 0xe7, 0xd7, 0x2c, 0x4e, 0xa9, 0xf5, 0xed, 0x63, 0xf2, 0x18, 0x9c, 0x0c, - 0xe2, 0x4d, 0x25, 0x23, 0x30, 0x3e, 0x49, 0x29, 0xa6, 0x37, 0xdf, 0xc2, - 0xdc, 0xf6, 0x5e, 0xae, 0x45, 0xd7, 0x8d, 0x56, 0xba, 0x29, 0x4f, 0xee, - 0xc9, 0x26, 0xd7, 0xbf, 0x10, 0x4d, 0x0a, 0x3b, 0x3d, 0x1f, 0xd5, 0x72, - 0xe1, 0xe6, 0xf5, 0x23, 0x4a, 0x17, 0x2d, 0xe4, 0x40, 0x55, 0x9b, 0x39, - 0x66, 0x36, 0xe4, 0x6d, 0x6d, 0xb6, 0x8d, 0x2a, 0x7e, 0x76, 0x73, 0xa5, - 0x86, 0x20, 0x3d, 0x18, 0xa0, 0x6c, 0x35, 0x59, 0xc8, 0x1c, 0xef, 0x0f, - 0x36, 0x1d, 0x6f, 0xba, 0x89, 0xb9, 0x9e, 0x7a, 0x58, 0x1d, 0x43, 0xad, - 0x85, 0x8b, 0x6b, 0xcc, 0x25, 0xb8, 0xe4, 0xdd, 0xa1, 0x35, 0xd9, 0xef, - 0xc4, 0xb1, 0xf6, 0x99, 0x27, 0x17, 0xb7, 0xbe, 0xd1, 0x4f, 0xa1, 0x81, - 0x4e, 0xb6, 0x19, 0xcd, 0xa0, 0x92, 0xeb, 0x56, 0x41, 0x4f, 0x37, 0xca, - 0x3b, 0x43, 0x85, 0x86, 0xdf, 0x5d, 0x5a, 0x8c, 0xd4, 0x5b, 0xc4, 0x28, - 0xdb, 0x16, 0xea, 0x3a, 0x2e, 0x9e, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0xea, 0x59, 0x40, 0xc4, 0x40, 0x8b, 0x6a, 0x8a, - 0xb8, 0x7f, 0x1e, 0x0b, 0xfe, 0xab, 0xa4, 0xac, 0x42, 0x91, 0xc5, 0xfa, - 0x2c, 0x7e, 0xb4, 0xf9, 0x5c, 0xd5, 0x4c, 0x6a, 0x74, 0x82, 0x90, 0x81, - 0x96, 0xb0, 0xf4, 0xd4, 0xba, 0xc9, 0xa3, 0x2e, 0x26, 0x0a, 0xc9, 0x55, - 0x65, 0xac, 0xde, 0x83, 0x37, 0xec, 0x0e, 0xf6, 0xdc, 0x8c, 0x34, 0xe6, - 0x57, 0xde, 0x32, 0x0a, 0x02, 0x62, 0x4f, 0x6a, 0x92, 0xa5, 0xb4, 0x40, - 0xde, 0x57, 0xf4, 0xd1, 0xa3, 0x1c, 0xd3, 0xf7, 0x4a, 0x15, 0xcc, 0x27, - 0x26, 0x00, 0xba, 0xf3, 0xfa, 0x4e, 0xc6, 0xe9, 0xc3, 0x05, 0x3d, 0x3a, - 0x89, 0x96, 0x7d, 0x41, 0xac, 0xca, 0x28, 0x7f, 0x69, 0x02, 0x40, 0x03, - 0x93, 0x86, 0x85, 0x85, 0x73, 0x00, 0x09, 0x5a, 0xcf, 0x5f, 0x1d, 0xaa, - 0x46, 0x41, 0x9d, 0x08, 0xbf, 0xea, 0x45, 0x9b, 0x93, 0xda, 0x9e, 0x81, - 0xba, 0x9e, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, - 0x6a, 0x1f, 0x9b, 0x03, 0xdd, 0xe4, 0x16, 0x07, 0x7f, 0x5b, 0xb0, 0xee, - 0xac, 0x55, 0xc4, 0x50, 0xe6, 0x2b, 0x17, 0xed, 0x7f, 0x50, 0x4d, 0x71, - 0x73, 0xae, 0xe0, 0x4d, 0xce, 0x08, 0xd9, 0x8b, 0x83, 0x2c, 0x01, 0x48, - 0x02, 0xd3, 0xbb, 0xca, 0x86, 0xd7, 0xca, 0x5f, 0xc7, 0xce, 0x59, 0xdf, - 0xc1, 0xcc, 0xf7, 0x7b, 0x54, 0xf8, 0x0d, 0x4f, 0x81, 0x9e, 0x50, 0x6a, - 0x65, 0x66, 0x4a, 0xec, 0x7a, 0x1b, 0x92, 0xb2, 0x39, 0x8f, 0x5d, 0x41, - 0x33, 0xcf, 0xe6, 0x1b, 0x34, 0x5d, 0xe1, 0xf6, 0xef, 0xcb, 0xa0, 0x55, - 0x7e, 0x1f, 0x45, 0x38, 0xb9, 0x56, 0x15, 0x3b, 0x70, 0xab, 0xc8, 0x2f, - 0x1c, 0xb9, 0x7d, 0x37, 0xe1, 0xb4, 0x03, 0x44, 0x5a, 0xf6, 0x57, 0x97, - 0x03, 0x54, 0x4c, 0x22, 0x88, 0xc3, 0x82, 0xfd, 0x91, 0xc1, 0xf1, 0x63, - 0xb4, 0x50, 0x46, 0x11, 0x64, 0x07, 0xfd, 0x85, 0xe5, 0x78, 0x57, 0xdd, - 0x19, 0x2a, 0x6b, 0x64, 0x3e, 0xec, 0xb8, 0xf3, 0xb5, 0x95, 0x29, 0x72, - 0xf1, 0x9d, 0xdd, 0xb9, 0xad, 0xd0, 0x78, 0x26, 0x86, 0x10, 0x10, 0x19, - 0xe4, 0x79, 0xae, 0xdc, 0x56, 0xb7, 0x54, 0x4f, 0x94, 0xc6, 0x26, 0x9a, - 0x93, 0xa8, 0x2e, 0x1b, 0x1c, 0xda, 0x87, 0x3a, 0xa2, 0x44, 0xb9, 0x0b, - 0x0f, 0xab, 0x70, 0x3b, 0xb7, 0x6c, 0xbf, 0x58, 0x67, 0x32, 0x7d, 0xa3, - 0x2a, 0xcb, 0x4e, 0x02, 0x92, 0xa1, 0x26, 0x0e, 0x20, 0x5e, 0xb3, 0xec, - 0xc4, 0x04, 0x5b, 0x7f, 0xe5, 0xbd, 0x30, 0xeb, 0xc8, 0xdd, 0xf1, 0x72, - 0x5a, 0x7e, 0xcb, 0x93, 0x22, 0xa0, 0x01, 0x9f, 0xbb, 0x24, 0x9f, 0x50, - 0x01, 0x1f, 0x24, 0x02, 0x85, 0x6d, 0xe6, 0x4d, 0x55, 0xc4, 0x07, 0xe9, - 0x87, 0x38, 0xbf, 0x1a, 0x3b, 0x05, 0x82, 0xc4, 0x73, 0x4b, 0x87, 0x3c, - 0xb4, 0x0a, 0x48, 0x8c, 0x06, 0x67, 0xe7, 0xbf, 0xcc, 0xe7, 0xe5, 0xc3, - 0xb2, 0x81, 0x60, 0xe2, 0xd1, 0xb1, 0x8f, 0x98, 0xbd, 0x7d, 0xbd, 0x4e, - 0x9a, 0xca, 0xbe, 0xcb, 0x81, 0x47, 0x25, 0xaa, 0xfa, 0x91, 0xcf, 0x78, - 0xce, 0xcb, 0x1a, 0x11, 0x79, 0xcf, 0x97, 0xa3, 0x95, 0x95, 0x6f, 0xd7, - 0xae, 0x80, 0xc9, 0xd5, 0x95, 0xb7, 0xcf, 0xe2, 0x9d, 0x98, 0x65, 0x80, - 0xfd, 0x2e, 0xee, 0x46, 0x5e, 0x46, 0x8c, 0xde, 0x52, 0xb4, 0xdc, 0xce, - 0xa8, 0xab, 0x4e, 0x0c, 0x12, 0x9f, 0x89, 0x9c, 0x84, 0x80, 0xfe, 0x08, - 0x64, 0x12, 0x12, 0x95, 0x62, 0xea, 0x65, 0xcc, 0x34, 0x80, 0xcf, 0x92, - 0x5f, 0xc2, 0xae, 0x76, 0xe7, 0x2f, 0xbb, 0xa8, 0xdb, 0x6a, 0x66, 0x60, - 0xaf, 0x88, 0xba, 0x65, 0x32, 0xcf, 0xf7, 0x6e, 0xd8, 0xd0, 0x69, 0xb0, - 0x12, 0x23, 0xd6, 0xc2, 0x32, 0xe5, 0x8e, 0x51, 0xc5, 0x61, 0x28, 0x45, - 0xf7, 0xf9, 0xea, 0x73, 0xce, 0x04, 0x2d, 0x56, 0x43, 0x10, 0x8b, 0x4f, - 0x6b, 0xfa, 0x32, 0xa8, 0x92, 0x8f, 0xd9, 0xb4, 0xfd, 0xa4, 0x74, 0xa8, - 0xea, 0xca, 0xd3, 0x84, 0xbb, 0x5a, 0x34, 0x57, 0xf9, 0xda, 0x25, 0x40, - 0x1f, 0x5e, 0xc2, 0x66, 0x43, 0x05, 0xdd, 0x13, 0x88, 0x91, 0x60, 0xa1, - 0x75, 0xd3, 0xc4, 0x27, 0xff, 0xda, 0x24, 0x3d, 0xd9, 0xd7, 0x47, 0x46, - 0x30, 0xd0, 0x76, 0xc4, 0x9e, 0x97, 0xe3, 0x43, 0xd7, 0x45, 0xaf, 0x49, - 0x36, 0xf2, 0x18, 0xdd, 0x3f, 0x86, 0x9a, 0xec, 0x9a, 0x70, 0xeb, 0x5a, - 0xe2, 0xa0, 0x4b, 0x45, 0x21, 0xb3, 0x32, 0x3d, 0x0c, 0x8c, 0x03, 0x13, - 0xae, 0x46, 0xb5, 0x1a, 0x0a, 0x03, 0x36, 0xfe, 0xfe, 0xfa, 0xc9, 0x4d, - 0x46, 0xf8, 0xfe, 0x6f, 0x99, 0x8c, 0xe4, 0x77, 0x0c, 0x27, 0x59, 0xf7, - 0xc3, 0xfc, 0x32, 0xb3, 0xa5, 0xae, 0xdc, 0x49, 0xac, 0x31, 0x27, 0xa6, - 0x14, 0x92, 0xfb, 0xe3, 0x69, 0x35, 0x8d, 0xa0, 0x50, 0x55, 0x09, 0x90, - 0xdf, 0x67, 0x08, 0x4c, 0x0e, 0xaf, 0x71, 0xc2, 0xe8, 0xb8, 0xdc, 0x45, - 0xe3, 0x6d, 0x58, 0x3f, 0x19, 0x8d, 0xcd, 0xeb, 0xe3, 0x02, 0x49, 0xd8, - 0xc8, 0x8b, 0x29, 0xb3, 0xef, 0x2b, 0xf0, 0x39, 0x5c, 0x11, 0xaa, 0x52, - 0x44, 0x0d, 0x1a, 0x3a, 0x7a, 0x62, 0xda, 0x6d, 0xe3, 0xdd, 0x03, 0x30, - 0x6d, 0x3e, 0x18, 0x30, 0x1d, 0xc0, 0xd0, 0x05, 0x67, 0x98, 0xf5, 0x2a, - 0xc7, 0xa1, 0x58, 0xd7, 0xf8, 0x6f, 0x7d, 0x07, 0x59, 0x27, 0x95, 0xb9, - 0x8d, 0x4d, 0xd7, 0xc8, 0x5e, 0x8b, 0x89, 0x14, 0xb7, 0x1b, 0x35, 0xaa, - 0x72, 0x02, 0x39, 0x3c, 0x41, 0x7c, 0x91, 0x93, 0x81, 0xe1, 0xad, 0xbe, - 0x77, 0x28, 0x80, 0xa2, 0x9c, 0xa8, 0x00, 0x18, 0xa5, 0x70, 0xec, 0xec, - 0x96, 0x95, 0x37, 0xa3, 0xee, 0x15, 0xa0, 0x69, 0x0e, 0x05, 0xb5, 0xb4, - 0xb6, 0xa7, 0x8b, 0xb9, 0x41, 0x88, 0x4f, 0x56, 0x39, 0xa7, 0xbe, 0x24, - 0xce, 0x4c, 0xe0, 0x9c, 0x24, 0x5a, 0xa1, 0xab, 0xcd, 0x82, 0xf1, 0x16, - 0x3f, 0xc0, 0xaf, 0xe1, 0x42, 0xe0, 0x7d, 0x1b, 0xd9, 0x8f, 0xb8, 0x04, - 0xa1, 0x88, 0xd9, 0xc3, 0xaf, 0x4f, 0xda, 0xfd, 0x0b, 0x5c, 0xc3, 0x04, - 0xf3, 0xdb, 0xe6, 0x76, 0x6e, 0xe9, 0xdc, 0xea, 0x6f, 0xa2, 0xa5, 0x75, - 0x2c, 0xc7, 0x91, 0x7d, 0x4b, 0xd5, 0x68, 0x55, 0xbb, 0x2d, 0x14, 0xdb, - 0x06, 0x76, 0xf7, 0xcc, 0x0a, 0x88, 0x6c, 0x2b, 0xa1, 0x57, 0xd6, 0x15, - 0x9c, 0x46, 0xcf, 0x5b, 0x6f, 0x9e, 0x7e, 0xc5, 0x39, 0xda, 0x97, 0x26, - 0x5e, 0xf5, 0x25, 0x06, 0xed, 0x8e, 0x9b, 0x1d, 0x1b, 0x91, 0x07, 0x89, - 0x08, 0xce, 0xd7, 0x38, 0x43, 0x64, 0x8e, 0xf5, 0x3a, 0x52, 0x4a, 0xfb, - 0x3e, 0xff, 0x2c, 0xb3, 0x78, 0x40, 0xb5, 0xdd, 0xb2, 0x8a, 0xd3, 0x6a, - 0xc5, 0xb0, 0xa3, 0x4a, 0xb8, 0xe7, 0x27, 0xa0, 0x5a, 0x8f, 0x0f, 0xda, - 0x53, 0x49, 0xc9, 0x77, 0x2a, 0xef, 0x78, 0xc6, 0xec, 0xaf, 0x10, 0xe5, - 0x71, 0xc5, 0x7a, 0x85, 0xdf, 0xb2, 0x85, 0x02, 0xe3, 0x55, 0x7a, 0x91, - 0x3a, 0x68, 0xb2, 0x9d, 0x3d, 0xd9, 0x01, 0xc5, 0x5f, 0x3c, 0xa8, 0x1d, - 0x99, 0xc6, 0xe7, 0xad, 0x09, 0xd1, 0x39, 0x3a, 0x92, 0xc5, 0x77, 0x9c, - 0xdf, 0x99, 0x56, 0x9f, 0xfe, 0xf8, 0xfd, 0xc8, 0x4f, 0x19, 0xa3, 0xa0, - 0xdf, 0xff, 0x17, 0xac, 0xa9, 0x03, 0x32, 0x85, 0x4c, 0x29, 0xca, 0x89, - 0x58, 0xdc, 0x88, 0xdd, 0xeb, 0x79, 0x68, 0x5e, 0x0f, 0x37, 0x1a, 0xf7, - 0x05, 0xfd, 0x39, 0x91, 0x25, 0x61, 0xf3, 0x04, 0xda, 0x97, 0xfc, 0x7b, - 0xcc, 0x40, 0x63, 0xfd, 0x5b, 0x3b, 0x27, 0x8e, 0x92, 0x6d, 0x98, 0x0f, - 0xcc, 0x9c, 0x9b, 0xda, 0xb2, 0xc6, 0xca, 0x56, 0xff, 0x7e, 0xcc, 0xa2, - 0xc0, 0x45, 0x3e, 0xf6, 0xdf, 0xa7, 0xe8, 0x2a, 0xef, 0x0c, 0xde, 0xec, - 0xa4, 0x1d, 0x2c, 0x3e, 0x03, 0xfd, 0xa4, 0x44, 0x60, 0x4a, 0xf5, 0x83, - 0x8f, 0x09, 0x2d, 0xe8, 0xd5, 0x46, 0xf6, 0x1c, 0x2d, 0x39, 0x28, 0x0c, - 0xdf, 0xa1, 0x2b, 0x05, 0x6e, 0x3c, 0x36, 0xdd, 0x91, 0x81, 0x52, 0xf1, - 0x56, 0xdc, 0xbb, 0x79, 0x62, 0xd8, 0x2e, 0x27, 0x5d, 0x9f, 0x3c, 0xce, - 0x81, 0x5c, 0x70, 0xe5, 0x4d, 0x33, 0x06, 0xd5, 0x14, 0x04, 0xb7, 0xbc, - 0x7b, 0x7a, 0xb4, 0xf7, 0x4a, 0x48, 0x8f, 0x97, 0x85, 0x96, 0x69, 0xc9, - 0x40, 0x52, 0xb1, 0x1c, 0x28, 0x82, 0xb3, 0x63, 0xee, 0x94, 0x2f, 0xcb, - 0x40, 0xad, 0xd7, 0x78, 0xb1, 0xc4, 0x21, 0x05, 0x36, 0xd9, 0x46, 0xf0, - 0x83, 0xcd, 0xee, 0x52, 0x7a, 0xa6, 0xa4, 0x40, 0xb0, 0x2f, 0xf0, 0x1c, - 0xfa, 0x42, 0x98, 0x54, 0x5b, 0xfe, 0x5e, 0xd6, 0x84, 0x73, 0xca, 0x39, - 0xbe, 0x87, 0xf2, 0x92, 0xee, 0x3d, 0x21, 0xcc, 0x69, 0x81, 0xe5, 0xe8, - 0x8a, 0xc3, 0x23, 0x64, 0x98, 0xd5, 0x1d, 0xcd, 0x5c, 0x6c, 0x37, 0xc8, - 0x8b, 0x08, 0x22, 0x12, 0x9f, 0x85, 0xc9, 0xed, 0xb4, 0xa6, 0x07, 0xe1, - 0x62, 0x79, 0x35, 0x5d, 0x26, 0x11, 0x4a, 0x6b, 0x33, 0x37, 0x91, 0x78, - 0xe8, 0xe2, 0xba, 0x8b, 0x8a, 0xb7, 0xbb, 0x0f, 0xd2, 0xb3, 0xa2, 0x02, - 0x0c, 0x57, 0x35, 0x99, 0x88, 0x6b, 0x9b, 0x64, 0x79, 0x1f, 0x4a, 0x48, - 0xd4, 0x3b, 0x5c, 0xeb, 0xb4, 0x83, 0xc3, 0xad, 0x9c, 0x6a, 0xb0, 0xcf, - 0x7f, 0x70, 0xe8, 0x22, 0x46, 0x25, 0xfe, 0x7e, 0x02, 0x44, 0x83, 0x02, - 0xb3, 0x08, 0x2e, 0x34, 0x08, 0x4b, 0xff, 0xa2, 0xc1, 0x60, 0xbb, 0xd8, - 0x89, 0x16, 0xf8, 0xaa, 0xab, 0xea, 0xf7, 0xa0, 0x10, 0x9a, 0xc9, 0xe9, - 0xa4, 0x81, 0xa7, 0x87, 0x32, 0x5b, 0xc1, 0xd0, 0xd9, 0x70, 0x6f, 0xb6, - 0x7c, 0x65, 0xd5, 0x0e, 0x65, 0x93, 0xfe, 0x6d, 0x66, 0xaa, 0xab, 0xd0, - 0x03, 0x07, 0xf2, 0xbe, 0x39, 0xd6, 0xc8, 0xac, 0xf2, 0x06, 0x58, 0x58, - 0x46, 0xc0, 0x1a, 0xbd, 0xa4, 0x96, 0x38, 0x31, 0x32, 0x89, 0x04, 0xdf, - 0xcd, 0x3c, 0x2e, 0x98, 0xb8, 0x39, 0xba, 0xe2, 0xca, 0x6b, 0xd0, 0x53, - 0xce, 0x4a, 0xc8, 0x95, 0x81, 0x84, 0x17, 0xce, 0x7f, 0x1d, 0xc1, 0x5a, - 0xc4, 0xc2, 0x73, 0x30, 0x6d, 0x0b, 0x8c, 0xf8, 0x66, 0x38, 0x4e, 0xa3, - 0x14, 0x84, 0x15, 0x36, 0x9e, 0x0d, 0x56, 0x6b, 0xa6, 0x77, 0x65, 0xa4, - 0x2c, 0x77, 0x00, 0x8b, 0x43, 0x57, 0xc6, 0x25, 0xc5, 0xd0, 0x17, 0x79, - 0x6b, 0x5d, 0xbc, 0xcd, 0xc8, 0x25, 0x8f, 0x20, 0x09, 0xcc, 0xbd, 0x80, - 0x10, 0xdf, 0x35, 0xf6, 0x9c, 0x04, 0x80, 0x23, 0xdc, 0x97, 0xe0, 0xba, - 0x29, 0x48, 0x2e, 0x95, 0x0f, 0xb1, 0x9b, 0xc7, 0xe6, 0x0b, 0x89, 0x16, - 0xe2, 0x81, 0x3b, 0x32, 0x69, 0xc4, 0xde, 0xc6, 0x12, 0x09, 0x47, 0xff, - 0x50, 0xe4, 0x45, 0xb7, 0x35, 0xd2, 0x61, 0x9b, 0x52, 0x6e, 0xbe, 0xaf, - 0xd2, 0xeb, 0x0c, 0x50, 0xf1, 0x57, 0x9f, 0x59, 0xe1, 0xc1, 0x4f, 0x8c, - 0x79, 0x07, 0x05, 0xce, 0x8d, 0x64, 0xb2, 0xf0, 0xd3, 0x4f, 0xe1, 0x7b, - 0xfa, 0x30, 0x0a, 0xc2, 0x5d, 0x0c, 0x47, 0x6c, 0x17, 0x77, 0x1f, 0xe5, - 0xd8, 0x14, 0xfd, 0xc1, 0x01, 0x70, 0x51, 0x60, 0xb2, 0x20, 0xfd, 0x86, - 0xbc, 0x19, 0x5e, 0x01, 0xa6, 0x19, 0x3a, 0x21, 0xa5, 0x0a, 0x1c, 0xd9, - 0xa9, 0x78, 0xbb, 0xc9, 0x01, 0x65, 0xe4, 0xb3, 0x48, 0xb8, 0xe1, 0xe7, - 0xb5, 0xf4, 0x4e, 0xa9, 0xb6, 0xe2, 0x5b, 0xeb, 0xf5, 0x76, 0x06, 0x1a, - 0xd9, 0x08, 0x40, 0xff, 0x72, 0xb2, 0xe3, 0x01, 0x50, 0xb1, 0xad, 0xb3, - 0xa3, 0xf6, 0xef, 0x72, 0x05, 0x0c, 0xf4, 0xce, 0x24, 0x2c, 0x63, 0x89, - 0x63, 0x9e, 0x21, 0xb8, 0xb0, 0xbe, 0xc7, 0x45, 0xae, 0x47, 0x2b, 0x9e, - 0x61, 0x81, 0x4c, 0x76, 0x96, 0x7b, 0x18, 0x37, 0x74, 0xcb, 0x00, 0xef, - 0x38, 0x72, 0x24, 0x0a, 0x63, 0xc1, 0x64, 0xd6, 0x41, 0xc8, 0x6a, 0xf1, - 0xe7, 0x11, 0x20, 0x4b, 0xc2, 0x95, 0x70, 0xb8, 0xf8, 0x8f, 0xd9, 0xae, - 0x8c, 0x12, 0xd8, 0x6f, 0x63, 0x30, 0xca, 0x56, 0x46, 0x11, 0xda, 0x49, - 0x1f, 0x84, 0x3d, 0xae, 0xab, 0x78, 0x29, 0x02, 0x6c, 0x43, 0xa3, 0xef, - 0x9d, 0x97, 0x59, 0x15, 0x53, 0xcd, 0xc7, 0x47, 0x65, 0x30, 0xc7, 0xae, - 0x31, 0x4a, 0x41, 0xb4, 0x66, 0x9c, 0xbb, 0x51, 0x0b, 0xbd, 0xe2, 0x7d, - 0x41, 0x2c, 0xd0, 0x75, 0x57, 0x93, 0xce, 0x2e, 0xeb, 0x31, 0x7f, 0x56, - 0xb2, 0xa4, 0x2b, 0x9f, 0xcc, 0xef, 0x6f, 0xf0, 0x77, 0x19, 0xad, 0x4d, - 0x2e, 0x37, 0x00, 0x75, 0x53, 0xae, 0x22, 0x44, 0x69, 0x1c, 0x8a, 0x90, - 0xf2, 0xcd, 0x0f, 0x6b, 0x37, 0xdb, 0xfd, 0x71, 0x64, 0x80, 0xd8, 0x57, - 0x1b, 0x8f, 0xff, 0x14, 0xd4, 0x5f, 0xe1, 0xd1, 0x0f, 0x06, 0x13, 0x61, - 0x29, 0xa9, 0x80, 0x9d, 0xc7, 0x8a, 0xa0, 0xb5, 0xaa, 0xfc, 0xe0, 0xb4, - 0xb4, 0xf0, 0x31, 0xf0, 0xec, 0x78, 0x03, 0x28, 0xb9, 0xf7, 0xd9, 0xa7, - 0xc8, 0xad, 0x2e, 0x16, 0xb8, 0x18, 0x82, 0x43, 0x66, 0x8b, 0xae, 0xb2, - 0x45, 0x2b, 0x0c, 0x9d, 0x69, 0xbd, 0x1b, 0xc5, 0x20, 0xc6, 0x41, 0xe7, - 0x4f, 0x4b, 0x7b, 0x46, 0x3d, 0x7a, 0x6d, 0x9f, 0x13, 0x2e, 0x0f, 0xf3, - 0x85, 0x3e, 0x5b, 0x12, 0xe5, 0xbf, 0x1b, 0x20, 0xc3, 0x5f, 0x6b, 0xf7, - 0xf7, 0xa3, 0xd7, 0x33, 0xd2, 0xcb, 0x18, 0xa5, 0xa4, 0xa2, 0xd3, 0x59, - 0x91, 0x9a, 0x04, 0xfa, 0x9d, 0xa5, 0x55, 0xad, 0x09, 0x5a, 0x1e, 0x0b, - 0x10, 0xd0, 0x46, 0x18, 0xe4, 0x09, 0xe8, 0x1b, 0x44, 0xd3, 0x78, 0x45, - 0xc0, 0xdf, 0xa2, 0xef, 0xfc, 0x59, 0x8a, 0x1b, 0x22, 0x60, 0xc9, 0x58, - 0x7d, 0x65, 0x45, 0xa9, 0xac, 0xd5, 0xd4, 0xc4, 0x44, 0xd3, 0x08, 0x44, - 0x40, 0x4d, 0x3d, 0x7e, 0x39, 0x81, 0x72, 0x15, 0x49, 0xd7, 0x2c, 0xda, - 0x33, 0xaf, 0xc5, 0xb5, 0x8a, 0x3c, 0xbf, 0x81, 0x88, 0x4f, 0x12, 0xe4, - 0xe8, 0xe6, 0x00, 0xb6, 0xd9, 0xcd, 0xb2, 0x70, 0x08, 0x15, 0x72, 0xf6, - 0x46, 0xc7, 0x98, 0x7c, 0x1d, 0x54, 0xd0, 0x66, 0x2d, 0xa1, 0xd8, 0xda, - 0xb0, 0xe5, 0x9f, 0xa3, 0x2f, 0x2c, 0xfb, 0x34, 0xb3, 0x21, 0x8b, 0x61, - 0xf4, 0xce, 0x60, 0x2b, 0xb5, 0x5e, 0x3d, 0x14, 0x2c, 0xbe, 0x19, 0x9d, - 0x5f, 0x01, 0xe1, 0x21, 0x34, 0x11, 0x6b, 0x10, 0xd4, 0x17, 0x58, 0xb3, - 0x0a, 0x30, 0xe4, 0x17, 0x51, 0x0b, 0xf2, 0xbb, 0xa6, 0xb7, 0x00, 0xa2, - 0xe8, 0xa5, 0xa3, 0x41, 0x1d, 0x65, 0x2d, 0x26, 0x93, 0x26, 0x7d, 0xdc, - 0xad, 0x6f, 0x83, 0xeb, 0x66, 0x55, 0xde, 0x60, 0x21, 0x56, 0x19, 0x4f, - 0x9b, 0x7b, 0x26, 0x4a, 0x80, 0xf5, 0xab, 0x8b, 0xbf, 0xe4, 0xb1, 0xa1, - 0xd6, 0x33, 0x32, 0xbf, 0x86, 0x8c, 0x3c, 0xd0, 0x12, 0x03, 0xd4, 0xb9, - 0x23, 0x54, 0x1b, 0x94, 0x2f, 0xa5, 0x34, 0x4d, 0x59, 0x18, 0x33, 0x8e, - 0x8c, 0xf7, 0x1f, 0xc9, 0x6d, 0x75, 0xfb, 0x2a, 0x22, 0x6c, 0x64, 0xb7, - 0x79, 0xd8, 0x3b, 0xf6, 0x4e, 0x98, 0xd8, 0xa8, 0x2c, 0x06, 0xd1, 0x92, - 0x32, 0x44, 0xec, 0x38, 0x40, 0x3b, 0x53, 0x16, 0x40, 0x8f, 0x92, 0x72, - 0x87, 0xa8, 0xb8, 0xc0, 0x8f, 0x25, 0x4c, 0x4f, 0x24, 0xfc, 0x8d, 0xc6, - 0xa6, 0xeb, 0x2f, 0xdf, 0x2f, 0x0d, 0x2f, 0xd3, 0x6e, 0x70, 0x71, 0xfe, - 0xf0, 0x2e, 0xe9, 0x84, 0xd3, 0xc1, 0xd1, 0x70, 0x4b, 0x8f, 0x7b, 0x60, - 0xb0, 0xb7, 0xe3, 0x79, 0x52, 0x6a, 0x6b, 0x26, 0x03, 0x8f, 0x6a, 0x0f, - 0x8d, 0x85, 0xd7, 0x5f, 0xf7, 0x39, 0x31, 0x0e, 0x26, 0x73, 0x84, 0x3f, - 0x9b, 0x10, 0x6f, 0x29, 0x63, 0x14, 0x36, 0xa2, 0xec, 0x44, 0x7d, 0x84, - 0xc6, 0x4a, 0xec, 0xfe, 0xac, 0xcb, 0xe4, 0xfa, 0xf6, 0x68, 0x83, 0x68, - 0xe0, 0x8f, 0xd3, 0x8a, 0x60, 0x73, 0xf1, 0x5c, 0x71, 0x02, 0x0c, 0xa2, - 0x88, 0x2c, 0xa2, 0x35, 0x35, 0x5c, 0x3f, 0xb1, 0xbe, 0xb3, 0x6b, 0x5c, - 0xe1, 0x78, 0x75, 0x40, 0x20, 0x87, 0x67, 0xca, 0x07, 0x1c, 0x9c, 0x02, - 0xc7, 0xf2, 0x9d, 0x1c, 0xda, 0x1b, 0x86, 0x1b, 0xc6, 0xa6, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x93, 0xca, 0x30, 0xae, - 0xea, 0x26, 0x6a, 0x1b, 0x15, 0x46, 0x0a, 0xe3, 0x57, 0x23, 0x4c, 0x0c, - 0x98, 0x8e, 0x3e, 0xbb, 0x43, 0x14, 0x73, 0xdf, 0x17, 0x91, 0xe2, 0xee, - 0x39, 0xf9, 0xc2, 0x2f, 0xdc, 0xad, 0x0e, 0x00, 0xf5, 0xdd, 0xe3, 0x97, - 0xba, 0x8c, 0xee, 0x53, 0xc4, 0x70, 0x37, 0x46, 0xcf, 0x04, 0xc3, 0xc8, - 0x56, 0x38, 0x2e, 0x39, 0x75, 0x32, 0x6d, 0x98, 0xc4, 0x14, 0xae, 0xa4, - 0x29, 0xa3, 0xc6, 0xb6, 0x66, 0x45, 0x48, 0xdf, 0xc0, 0xa9, 0x4b, 0x4f, - 0xef, 0xb9, 0xb4, 0x89, 0x0d, 0x64, 0x00, 0x5c, 0xd1, 0xc8, 0x2b, 0xf7, - 0xc5, 0x1a, 0x1b, 0x06, 0xb7, 0x49, 0xb1, 0xe3, 0x4d, 0x87, 0xf9, 0x3f, - 0xba, 0x39, 0xa3, 0x56, 0x7f, 0x43, 0xcc, 0x15, 0x9c, 0x3d, 0xba, 0x71, - 0x7b, 0xeb, 0x45, 0x0f, 0x15, 0x1b, 0x6c, 0x84, 0x75, 0x6d, 0x43, 0x0b, - 0x27, 0x12, 0x6b, 0xbc, 0x0a, 0x6d, 0xe4, 0xf6, 0x4f, 0xc7, 0xbb, 0x9e, - 0x91, 0xb5, 0x09, 0x5f, 0x79, 0x2a, 0xbf, 0xda, 0x34, 0x91, 0x44, 0x47, - 0x52, 0x64, 0x00, 0x89, 0x27, 0x17, 0x5c, 0xe9, 0x90, 0x8b, 0xcb, 0xbe, - 0x21, 0x47, 0x65, 0x1c, 0x54, 0x61, 0x48, 0x17, 0x66, 0xb7, 0xa1, 0x60, - 0x27, 0x31, 0x04, 0x42, 0x3b, 0x33, 0x3d, 0xda, 0xf7, 0x61, 0x3d, 0x4b, - 0x91, 0xa5, 0x74, 0x4b, 0xde, 0x16, 0xf2, 0x79, 0x3e, 0xf7, 0x89, 0x87, - 0xb3, 0xdd, 0xa2, 0x49, 0xd7, 0x54, 0x1b, 0x39, 0xff, 0xb5, 0xec, 0x9d, - 0x1d, 0x09, 0x7e, 0x5a, 0x3c, 0xd1, 0xdc, 0x0e, 0x2a, 0x0e, 0x2c, 0x40, - 0x4e, 0xa5, 0x8c, 0x9d, 0xc8, 0x9b, 0xa5, 0xb2, 0x40, 0xa4, 0xaa, 0x3b, - 0xac, 0x93, 0x19, 0xf7, 0xa1, 0x8b, 0xf8, 0x4a, 0x40, 0x08, 0x5d, 0x1d, - 0xb0, 0xae, 0x0f, 0x67, 0xa7, 0x21, 0xaf, 0xe3, 0xb1, 0xfc, 0xff, 0xa0, - 0x95, 0x66, 0x2b, 0xf7, 0x82, 0x2d, 0x8a, 0x26, 0x0f, 0xc3, 0xed, 0x62, - 0xb6, 0xcb, 0x4c, 0x86, 0xe9, 0x20, 0x78, 0x3f, 0x08, 0x53, 0x8f, 0x41, - 0xf1, 0xa1, 0x04, 0x77, 0xd9, 0xe6, 0xea, 0x26, 0x6d, 0x33, 0x48, 0xb3, - 0xbb, 0xed, 0xfc, 0xd7, 0xa3, 0x2b, 0xe2, 0x39, 0xcf, 0x78, 0x4e, 0x11, - 0x26, 0xad, 0x39, 0x83, 0x6e, 0x72, 0xbf, 0xc6, 0x34, 0x23, 0x97, 0x5d, - 0x7b, 0x64, 0x1e, 0x78, 0x00, 0x34, 0x92, 0x5d, 0x3f, 0x23, 0x28, 0x60, - 0x7f, 0x88, 0xf0, 0xca, 0x96, 0x4a, 0x15, 0xbf, 0x8a, 0xb7, 0xd0, 0xd9, - 0x99, 0x8b, 0xdb, 0x26, 0xdc, 0x7e, 0x8d, 0x35, 0x53, 0x60, 0x07, 0x85, - 0x80, 0xc4, 0x9c, 0x0d, 0x81, 0xe2, 0x93, 0x85, 0x76, 0x2d, 0x85, 0x21, - 0x6e, 0xda, 0x29, 0xe5, 0xb1, 0x08, 0x46, 0x09, 0x1b, 0x8a, 0xd9, 0xd2, - 0xd7, 0x16, 0x74, 0xee, 0x26, 0x3e, 0xc4, 0x8c, 0x2e, 0x6b, 0x0c, 0xbc, - 0x95, 0xea, 0x4a, 0xb2, 0xd6, 0x6f, 0x43, 0xd1, 0x3a, 0x8f, 0xbd, 0x77, - 0xb4, 0x67, 0x63, 0x6b, 0xd2, 0xe0, 0xf0, 0x81, 0x74, 0xb7, 0xc5, 0x11, - 0x60, 0x10, 0x6b, 0xc6, 0x0f, 0xfd, 0x84, 0x2e, 0x5c, 0x8f, 0x3b, 0xf5, - 0x68, 0xa7, 0x62, 0xc6, 0x4f, 0xa6, 0xee, 0x19, 0x44, 0xea, 0xc0, 0xe4, - 0x64, 0x12, 0x71, 0x2f, 0xfb, 0xa3, 0x4d, 0xb0, 0x8e, 0x5e, 0xe1, 0x79, - 0x65, 0xd4, 0xf3, 0xed, 0x73, 0x04, 0xf1, 0x6d, 0xc6, 0x75, 0x54, 0x28, - 0x13, 0xe2, 0xd6, 0xa1, 0x26, 0xf9, 0xa4, 0x29, 0x20, 0x5b, 0xd0, 0x3c, - 0x3d, 0xf3, 0x7a, 0x18, 0x9a, 0x3d, 0xec, 0x6a, 0x4c, 0xfd, 0xa5, 0x00, - 0xdf, 0xec, 0xfd, 0x64, 0x38, 0x66, 0xa7, 0xba, 0x59, 0xb3, 0x9b, 0x9c, - 0x44, 0xfb, 0x10, 0x08, 0xb8, 0x79, 0xea, 0x85, 0xbf, 0xa4, 0x14, 0xce, - 0xce, 0x85, 0x22, 0x3f, 0x16, 0x00, 0x1c, 0x57, 0xc8, 0x5a, 0x1b, 0xf5, - 0xff, 0xde, 0x7e, 0xa9, 0xcc, 0xf3, 0xb5, 0x1d, 0x57, 0x06, 0xda, 0xbb, - 0x6c, 0x0a, 0x1e, 0xd4, 0x09, 0x74, 0x84, 0x1d, 0xfa, 0xdf, 0x33, 0x1e, - 0xe2, 0x8f, 0x10, 0xf7, 0x73, 0xab, 0x71, 0xb8, 0x64, 0xce, 0xc0, 0x49, - 0xc0, 0x36, 0xd3, 0x39, 0x31, 0x4c, 0x12, 0x5b, 0xf3, 0xf9, 0xb4, 0x2c, - 0x88, 0xba, 0xd4, 0x1a, 0xbd, 0x0c, 0x99, 0xbd, 0x0e, 0xad, 0x51, 0xe0, - 0xca, 0xdb, 0x25, 0x66, 0x83, 0xe0, 0x55, 0x18, 0xeb, 0xa6, 0x4e, 0x56, - 0xcb, 0x2f, 0xa5, 0xf2, 0x42, 0x7a, 0xa1, 0x05, 0xf0, 0x3a, 0x71, 0x5a, - 0x78, 0x3a, 0x7a, 0x6d, 0x12, 0x9f, 0x43, 0xc5, 0xcc, 0xb3, 0xfd, 0xf2, - 0xbf, 0x05, 0x16, 0xef, 0x07, 0xf9, 0xde, 0x0d, 0x51, 0xf0, 0x33, 0x86, - 0x43, 0x57, 0x40, 0xbc, 0xa9, 0xbd, 0xa0, 0x23, 0xff, 0xbb, 0xe6, 0x15, - 0xa1, 0xeb, 0xe9, 0x78, 0x0d, 0x72, 0x76, 0xf2, 0xb6, 0x6e, 0x46, 0xe2, - 0x86, 0xab, 0x3c, 0x52, 0x2c, 0xc6, 0x77, 0xdd, 0x57, 0xf7, 0x4d, 0x36, - 0xbb, 0x41, 0x08, 0x21, 0xaa, 0xe6, 0x44, 0x50, 0xed, 0xaf, 0x18, 0xb3, - 0xdd, 0x6b, 0x57, 0x46, 0x9e, 0x44, 0x93, 0x20, 0xe0, 0x62, 0x95, 0xcd, - 0xcf, 0xe4, 0x96, 0x92, 0xc3, 0x0d, 0x16, 0xb2, 0xc3, 0xf4, 0x0f, 0x3f, - 0x87, 0x17, 0xb9, 0x7b, 0x60, 0x60, 0xfa, 0xfb, 0x81, 0x5c, 0xb3, 0xb7, - 0x89, 0x73, 0xf7, 0x35, 0xf7, 0x27, 0xf1, 0x0e, 0xa4, 0xa1, 0xba, 0xea, - 0x6a, 0xe3, 0x5c, 0x0f, 0xf7, 0x15, 0xbc, 0x28, 0x57, 0x27, 0x8f, 0xd8, - 0xca, 0x82, 0x19, 0xd0, 0xa3, 0x9d, 0xe5, 0xe0, 0x44, 0xbf, 0x78, 0xa4, - 0x09, 0x69, 0x27, 0xa0, 0x69, 0xb5, 0xd4, 0xbe, 0x00, 0xe6, 0x03, 0x97, - 0xbc, 0x8b, 0xfc, 0x25, 0x70, 0xb3, 0x49, 0x30, 0xe3, 0x24, 0x19, 0x77, - 0xb4, 0x93, 0x46, 0x03, 0xe6, 0x22, 0xaf, 0x76, 0xd2, 0x90, 0x00, 0x05, - 0x46, 0xb8, 0xa4, 0xf5, 0x4c, 0xaa, 0x04, 0x63, 0xa0, 0x57, 0xe0, 0x20, - 0x6e, 0x1a, 0xed, 0x21, 0x86, 0xd0, 0x38, 0x5b, 0xe6, 0xa7, 0xb0, 0xe7, - 0x75, 0xe3, 0x76, 0xb3, 0x15, 0x8b, 0xdc, 0x10, 0x52, 0x15, 0x21, 0x7b, - 0xd0, 0xc4, 0x75, 0x26, 0x1d, 0x6e, 0x0d, 0x4c, 0x08, 0x5b, 0x95, 0x9a, - 0xd0, 0xda, 0xbe, 0x23, 0x98, 0xde, 0x60, 0x2a, 0xe9, 0xa4, 0x92, 0xf0, - 0x92, 0x84, 0xdc, 0x86, 0x60, 0xf5, 0x23, 0x31, 0xf5, 0xe9, 0xd6, 0x00, - 0xc1, 0x78, 0xab, 0x05, 0x94, 0xd3, 0x47, 0x4d, 0x32, 0x0f, 0x82, 0xa0, - 0x99, 0x0b, 0xfe, 0x6b, 0x58, 0xf9, 0x24, 0xf6, 0x17, 0xa0, 0x5f, 0x24, - 0x6a, 0xc6, 0x01, 0xa8, 0xfa, 0xca, 0xdc, 0xb6, 0x83, 0xcb, 0xd2, 0x3b, - 0xb7, 0x0b, 0x04, 0x3e, 0x6a, 0xaf, 0x23, 0x17, 0x3e, 0x14, 0xce, 0x52, - 0x1c, 0xe3, 0x06, 0x66, 0x29, 0x17, 0x6f, 0x7e, 0x66, 0x06, 0xa9, 0x68, - 0x7f, 0xca, 0xad, 0xa8, 0xb7, 0x2d, 0xa4, 0x5d, 0xa6, 0x16, 0xcd, 0xed, - 0xee, 0x14, 0x96, 0xc8, 0x12, 0x69, 0x4e, 0x70, 0x72, 0x2a, 0x75, 0x82, - 0x08, 0x3f, 0x3e, 0x27, 0xa0, 0xea, 0x43, 0x84, 0xa9, 0x9a, 0x91, 0x87, - 0x4f, 0x20, 0x61, 0x55, 0x8d, 0x70, 0xad, 0x6c, 0x59, 0x5d, 0x13, 0x80, - 0xbb, 0x52, 0x55, 0x81, 0x8b, 0x59, 0x94, 0x0f, 0xc2, 0x54, 0x79, 0x59, - 0xe8, 0x9d, 0x58, 0xe5, 0x91, 0x10, 0xb3, 0xef, 0x1c, 0xda, 0xaa, 0xdd, - 0x91, 0x0b, 0xb0, 0x14, 0x3b, 0xad, 0x02, 0x98, 0x40, 0x3c, 0x54, 0xc4, - 0x23, 0xb9, 0x40, 0x54, 0x7e, 0x88, 0x10, 0x3e, 0x24, 0xe5, 0xf6, 0xdf, - 0x5c, 0x9e, 0x7a, 0x9f, 0xd0, 0xff, 0x5e, 0x9c, 0xb6, 0x30, 0x17, 0x94, - 0xd2, 0xaa, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, - 0x96, 0xff, 0x2f, 0x01, 0x60, 0x2c, 0x1b, 0xe3, 0xc6, 0xcb, 0xa4, 0x41, - 0xa1, 0x44, 0x13, 0x14, 0xe2, 0x44, 0x77, 0x1c, 0x96, 0xe8, 0xe6, 0x4f, - 0x70, 0x99, 0x3a, 0xef, 0xa1, 0x6f, 0x1f, 0x7f, 0xb9, 0xe9, 0x1e, 0x35, - 0x37, 0x5b, 0x94, 0x90, 0x78, 0xcc, 0x8d, 0xcd, 0x6c, 0x9f, 0xf6, 0x73, - 0xed, 0x23, 0xa2, 0x28, 0x64, 0x58, 0x50, 0x64, 0x05, 0xbc, 0xc9, 0x9b, - 0x5a, 0xec, 0x3f, 0x2b, 0x61, 0xcf, 0xa7, 0x35, 0x56, 0x8c, 0x77, 0x68, - 0xd6, 0xcf, 0x9b, 0xc5, 0x62, 0xee, 0x3a, 0xb2, 0xfe, 0x78, 0xba, 0x02, - 0xe7, 0x26, 0x8a, 0x89, 0x30, 0x19, 0xcc, 0xb0, 0x98, 0xbf, 0x30, 0x2c, - 0xae, 0x13, 0x6c, 0x93, 0x86, 0x19, 0x84, 0x13, 0x01, 0x2f, 0x39, 0x4e, - 0x33, 0xd1, 0x15, 0x99, 0xf7, 0x1e, 0xb8, 0x86, 0xdb, 0xb6, 0xf9, 0x56, - 0x42, 0x0e, 0x4a, 0xb1, 0x5e, 0xf0, 0x9a, 0x06, 0x5e, 0xab, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0xcd, 0xde, 0xad, 0x40, - 0x34, 0xcd, 0x79, 0x0a, 0x29, 0x84, 0x05, 0x3f, 0xb5, 0xbe, 0x49, 0x84, - 0x43, 0xcc, 0xa6, 0xe3, 0xe9, 0xdc, 0x84, 0x14, 0xe7, 0xb3, 0x1b, 0x96, - 0xe8, 0xda, 0x35, 0x15, 0x38, 0xf5, 0xb3, 0xb5, 0x91, 0xc3, 0xc3, 0x94, - 0xc6, 0x79, 0xeb, 0xf5, 0x22, 0x78, 0xf0, 0x0b, 0xda, 0xb0, 0x91, 0xa7, - 0x43, 0x71, 0x8e, 0xa6, 0x52, 0x0f, 0x81, 0x06, 0xc8, 0xdf, 0xb5, 0x1f, - 0x92, 0xb0, 0xfe, 0x93, 0x38, 0x4c, 0xf4, 0x17, 0x66, 0x31, 0xea, 0x08, - 0x72, 0xb9, 0xaa, 0xfd, 0x40, 0x8d, 0xbf, 0x56, 0x19, 0xb1, 0xb5, 0x8e, - 0x4e, 0x4e, 0x73, 0x7f, 0x4b, 0x0c, 0x70, 0x94, 0x7c, 0x9f, 0xfc, 0x23, - 0x35, 0xba, 0xd2, 0x23, 0x88, 0x1d, 0x83, 0x28, 0x45, 0xd7, 0x1b, 0x63, - 0xfb, 0x36, 0x86, 0x06, 0xf3, 0x99, 0x81, 0x6e, 0xd7, 0xf1, 0xd4, 0x53, - 0x6d, 0x30, 0x3c, 0x8d, 0xac, 0xc6, 0x9a, 0xd5, 0xe8, 0x4f, 0x11, 0x58, - 0xba, 0xfd, 0x67, 0x06, 0xe7, 0x1a, 0xb4, 0xa1, 0x45, 0x13, 0xf2, 0x3b, - 0xdc, 0x71, 0xf0, 0xc6, 0x53, 0xfc, 0x8b, 0x2f, 0x14, 0xe4, 0xe0, 0xd6, - 0x8c, 0x96, 0x4c, 0x48, 0xc0, 0x30, 0x6e, 0x00, 0x0f, 0x42, 0xfe, 0xa7, - 0x9d, 0x0f, 0xf2, 0x52, 0x58, 0xf9, 0x35, 0x33, 0x99, 0xda, 0xd5, 0x9d, - 0x61, 0x26, 0x6b, 0x80, 0xff, 0x08, 0x51, 0x54, 0x26, 0xfa, 0x8d, 0xfc, - 0x67, 0x60, 0x93, 0x0e, 0xcd, 0x78, 0x41, 0x5a, 0x31, 0x47, 0x14, 0xb0, - 0x65, 0x89, 0x30, 0xcb, 0x0c, 0xc5, 0xa0, 0x37, 0xa8, 0xe0, 0xcf, 0x24, - 0xa4, 0x2f, 0xad, 0xa7, 0x9c, 0xa2, 0xe8, 0x81, 0x17, 0xbe, 0x2f, 0xd5, - 0xd1, 0xa8, 0xff, 0x9d, 0x5e, 0x7f, 0xd9, 0x6c, 0x56, 0xe6, 0xc4, 0x60, - 0x8d, 0xa5, 0x47, 0x5e, 0x43, 0x1e, 0x34, 0x23, 0xb3, 0x6a, 0xdf, 0x6c, - 0xf8, 0xd1, 0x85, 0x11, 0xaa, 0x74, 0x85, 0x71, 0x27, 0xc5, 0x80, 0x37, - 0x60, 0xb4, 0x2b, 0x53, 0x5a, 0xc4, 0x35, 0xd1, 0xe8, 0x4b, 0x01, 0x58, - 0x1f, 0xdb, 0x73, 0xf3, 0x2c, 0x8b, 0xbb, 0x17, 0x36, 0x76, 0x35, 0x6b, - 0xa0, 0x82, 0x47, 0xf5, 0x16, 0x21, 0x41, 0x43, 0xc9, 0x1f, 0x53, 0xf9, - 0xe9, 0x47, 0xf0, 0x9c, 0x6d, 0xe3, 0x23, 0x59, 0x74, 0xdc, 0x1a, 0x8f, - 0x4e, 0x6c, 0x71, 0x83, 0x7e, 0xd0, 0x2b, 0x50, 0x44, 0x86, 0x5f, 0xbf, - 0x60, 0x92, 0xeb, 0x9a, 0x9b, 0xa2, 0xc9, 0x2b, 0xa8, 0xc4, 0x77, 0x4e, - 0x3f, 0xf8, 0xa6, 0x39, 0x50, 0x5c, 0x7e, 0x2a, 0x70, 0xb0, 0x5d, 0x28, - 0xb2, 0x81, 0xa9, 0xaf, 0x16, 0x5e, 0x27, 0xeb, 0x03, 0x0e, 0x82, 0xad, - 0x28, 0x51, 0x16, 0xd1, 0xf4, 0x58, 0x75, 0x1a, 0xf9, 0x6a, 0xbf, 0x73, - 0xd7, 0x84, 0x07, 0x7f, 0x4c, 0x4e, 0x29, 0x02, 0x9b, 0x60, 0x81, 0x85, - 0xa9, 0xbf, 0xc7, 0xa0, 0x8f, 0x8a, 0xdc, 0xa4, 0xc5, 0x17, 0x51, 0x24, - 0x15, 0x28, 0x9e, 0x5e, 0x78, 0x84, 0x21, 0x02, 0xca, 0x26, 0x61, 0x4e, - 0x95, 0xa6, 0x8d, 0xa6, 0x98, 0x7d, 0x1f, 0x84, 0x19, 0x24, 0x8b, 0x31, - 0x76, 0x89, 0x2a, 0x5f, 0xa9, 0xfb, 0xaa, 0x8a, 0x8c, 0xce, 0xe4, 0x30, - 0xd6, 0xec, 0x5b, 0x39, 0xb7, 0x09, 0x80, 0x23, 0x4c, 0xe1, 0x6e, 0x8f, - 0x7c, 0x10, 0xe8, 0x8a, 0x60, 0x35, 0xd7, 0xa3, 0xe0, 0x5f, 0xcd, 0xfa, - 0x3d, 0x8f, 0xd8, 0x5d, 0xec, 0xc9, 0xc5, 0xa0, 0x73, 0x41, 0x89, 0xe5, - 0x39, 0xf2, 0x42, 0xff, 0x08, 0xa0, 0x12, 0xb7, 0x4a, 0x5e, 0x46, 0x06, - 0x31, 0xbd, 0x88, 0x5e, 0x9e, 0x05, 0x17, 0x51, 0xb3, 0xe7, 0x88, 0x10, - 0x19, 0x32, 0xff, 0x8a, 0x1e, 0xce, 0x66, 0xbc, 0x84, 0x1f, 0xed, 0x52, - 0x52, 0x77, 0xe1, 0x5e, 0xa6, 0x21, 0xe4, 0xad, 0x59, 0xca, 0xa3, 0x77, - 0xea, 0x66, 0x28, 0x15, 0x73, 0x3a, 0xfd, 0xe4, 0x75, 0x46, 0x99, 0x59, - 0x5c, 0x7a, 0x9b, 0x9d, 0x11, 0xb4, 0x76, 0x45, 0x06, 0x45, 0x41, 0x1e, - 0x94, 0xb7, 0xd9, 0xb8, 0xcb, 0xbf, 0x71, 0xec, 0xba, 0x9f, 0x4a, 0x1b, - 0xbc, 0xfd, 0x5c, 0x06, 0x64, 0xfd, 0x31, 0x52, 0xc0, 0xe4, 0xa7, 0x21, - 0x2f, 0x22, 0x92, 0xf0, 0x51, 0x33, 0x92, 0x1d, 0x40, 0x3c, 0x01, 0x81, - 0x3b, 0xa8, 0x2e, 0x4e, 0xb6, 0x60, 0xcd, 0xd4, 0x36, 0x3b, 0x2e, 0x1d, - 0x5e, 0x43, 0xd9, 0x94, 0xf1, 0x51, 0xd3, 0x59, 0x94, 0x6a, 0xd5, 0x5f, - 0x1f, 0xd3, 0xa6, 0x55, 0xda, 0x15, 0xf1, 0x3e, 0x2c, 0x60, 0xb8, 0xc3, - 0xda, 0x0e, 0x56, 0x53, 0xea, 0xcd, 0x39, 0x27, 0x94, 0x86, 0x94, 0xb2, - 0x5b, 0xd8, 0x9a, 0x12, 0x94, 0xb0, 0xb6, 0x77, 0x28, 0xba, 0xde, 0xb6, - 0x60, 0x4d, 0x2b, 0x6e, 0x3d, 0xf6, 0xf1, 0x48, 0xf7, 0x77, 0xa1, 0x49, - 0xe0, 0x9f, 0x1e, 0xc9, 0xe6, 0xcb, 0x95, 0x26, 0x61, 0x5a, 0xc9, 0xed, - 0x49, 0x40, 0x17, 0x57, 0x15, 0xfc, 0x3c, 0xb8, 0x28, 0x79, 0xb8, 0x42, - 0x2a, 0xf9, 0xd4, 0x19, 0xb9, 0x5f, 0x41, 0xc2, 0x25, 0xd7, 0x88, 0x34, - 0xb3, 0x25, 0x4e, 0xca, 0xff, 0x9e, 0x59, 0x9a, 0x33, 0xc8, 0x12, 0xf9, - 0xd5, 0x70, 0xc0, 0x8b, 0x43, 0x13, 0xc4, 0x8d, 0x45, 0x99, 0xaa, 0xd7, - 0xeb, 0xb1, 0xe9, 0xb7, 0x5b, 0xab, 0x48, 0xd1, 0x26, 0x60, 0x8c, 0x13, - 0x55, 0x8a, 0x41, 0xd3, 0x68, 0x58, 0xd4, 0xa6, 0x30, 0x6e, 0x88, 0x3e, - 0x81, 0x6e, 0x61, 0x06, 0x13, 0x66, 0xd5, 0x8e, 0x5d, 0x87, 0x4f, 0xd9, - 0xb1, 0x66, 0xb3, 0xc5, 0x88, 0xa9, 0xc0, 0x73, 0xcb, 0x7f, 0x42, 0xec, - 0x96, 0x64, 0xad, 0x72, 0x85, 0x72, 0xaf, 0xeb, 0xa9, 0xc4, 0x17, 0x86, - 0xab, 0xe7, 0x23, 0xd7, 0x96, 0xf7, 0xb2, 0xb3, 0x51, 0xe1, 0x9a, 0x3b, - 0x0e, 0xaf, 0x89, 0xca, 0x7b, 0xf1, 0x70, 0x7b, 0xc7, 0x82, 0xfc, 0xc7, - 0x6c, 0x37, 0xd9, 0x7b, 0x82, 0x0f, 0x94, 0xcf, 0xd1, 0xa9, 0x33, 0xc2, - 0xa4, 0xab, 0xed, 0xad, 0xee, 0x64, 0x5d, 0x04, 0xf2, 0xcb, 0x8e, 0x99, - 0x22, 0x33, 0x69, 0x85, 0x85, 0xb6, 0x1a, 0x9b, 0x09, 0x18, 0xbe, 0xcd, - 0x63, 0xf6, 0x5d, 0x52, 0xbc, 0x26, 0x99, 0x3e, 0x52, 0xe5, 0x0c, 0xc5, - 0xee, 0xdd, 0xbb, 0x07, 0xbc, 0x38, 0xc1, 0x67, 0x96, 0x8c, 0xe6, 0xe4, - 0x18, 0xfa, 0x07, 0x91, 0x48, 0xef, 0x9c, 0x70, 0x9d, 0x5b, 0x1c, 0x0e, - 0xd5, 0xd3, 0x59, 0xee, 0x44, 0x13, 0xf7, 0x00, 0xa6, 0x20, 0xad, 0x65, - 0x1d, 0xb7, 0x96, 0x2f, 0x79, 0x7b, 0x04, 0xa3, 0x10, 0x90, 0x29, 0x8c, - 0xa3, 0x2e, 0x14, 0x39, 0xd3, 0xe4, 0x6e, 0x46, 0xf7, 0x6e, 0x96, 0x68, - 0xd9, 0xef, 0x45, 0xf7, 0x3c, 0xcd, 0xc7, 0xca, 0x33, 0x64, 0x8e, 0x31, - 0x80, 0x48, 0x7b, 0x7c, 0x81, 0x9a, 0x48, 0xff, 0xd5, 0x0d, 0x74, 0xe7, - 0x77, 0x46, 0x61, 0x9b, 0xde, 0xed, 0x83, 0xe9, 0x4f, 0x92, 0xc1, 0x16, - 0xad, 0x44, 0x40, 0x23, 0xce, 0x04, 0x31, 0xbf, 0xcf, 0xe2, 0x5a, 0x68, - 0x5a, 0xf4, 0x0f, 0xe1, 0x87, 0x79, 0xb0, 0x32, 0x0b, 0x09, 0x6b, 0x72, - 0x2b, 0x16, 0x06, 0x67, 0x82, 0x0b, 0x92, 0x35, 0xdb, 0x4c, 0xe2, 0x4a, - 0x60, 0x99, 0xaf, 0x52, 0x10, 0x4b, 0xa5, 0xcf, 0xac, 0x66, 0x49, 0x56, - 0x04, 0xc0, 0xd6, 0x6f, 0x62, 0x53, 0x6f, 0xcb, 0x62, 0xe9, 0xa5, 0xca, - 0x18, 0x8e, 0x86, 0x3f, 0x36, 0xfd, 0xea, 0x55, 0x16, 0x6d, 0x6c, 0x6a, - 0x8f, 0xa7, 0x9c, 0x70, 0x15, 0xd7, 0xf4, 0x57, 0x68, 0x04, 0x84, 0x60, - 0x3b, 0xb0, 0x32, 0xc4, 0xea, 0x9d, 0x70, 0xb9, 0xa6, 0x34, 0xe5, 0xfa, - 0xa1, 0x24, 0x54, 0x7f, 0xef, 0xac, 0xb4, 0x5f, 0xa0, 0xc0, 0x40, 0x3f, - 0x73, 0xdf, 0x56, 0xa6, 0xd9, 0x17, 0xf4, 0xff, 0x50, 0xae, 0x21, 0x0d, - 0x5a, 0xe0, 0xb0, 0xf9, 0x5b, 0x7a, 0x61, 0x6e, 0xa6, 0x85, 0x85, 0xbf, - 0x19, 0x03, 0xe2, 0x74, 0x1f, 0x03, 0x70, 0x76, 0x3c, 0xed, 0x02, 0x7d, - 0xfa, 0xf9, 0x1e, 0x17, 0xdd, 0x42, 0x30, 0xf0, 0x32, 0x47, 0x46, 0xae, - 0xf5, 0x64, 0xe6, 0x5e, 0x2b, 0x40, 0x86, 0x97, 0xb1, 0x24, 0x52, 0x69, - 0x67, 0x79, 0x8e, 0x0d, 0xcc, 0x07, 0xcb, 0x72, 0x29, 0xe9, 0xba, 0x2d, - 0xf7, 0xcb, 0xe3, 0x86, 0x06, 0xaa, 0x6d, 0x79, 0xf8, 0xb6, 0x93, 0x0a, - 0x9c, 0x97, 0xef, 0x47, 0x37, 0x13, 0x2e, 0x6b, 0xfd, 0x59, 0x0c, 0xc9, - 0x5e, 0x5e, 0xcd, 0x71, 0x6f, 0x99, 0x0d, 0x88, 0x9d, 0xbb, 0x7c, 0x2b, - 0x22, 0xd5, 0xbe, 0xee, 0x26, 0x1c, 0xe1, 0xad, 0xc8, 0x4d, 0x5f, 0x6b, - 0xd1, 0xf4, 0x30, 0x4d, 0x46, 0x1d, 0x54, 0x11, 0x4b, 0xa0, 0x7f, 0x94, - 0x71, 0xc0, 0x44, 0x4a, 0x42, 0x11, 0xf5, 0x89, 0xec, 0xb5, 0x24, 0x45, - 0xf1, 0xf0, 0x30, 0x54, 0xf8, 0x62, 0xdb, 0x58, 0x3d, 0x7c, 0x2a, 0x82, - 0xe5, 0xbe, 0x13, 0xcf, 0xdc, 0x88, 0xfb, 0xd3, 0x1e, 0x4d, 0xa5, 0x3e, - 0xad, 0x95, 0xa2, 0xe6, 0x48, 0x73, 0xb2, 0xbe, 0x96, 0xef, 0x8e, 0x0b, - 0x28, 0xf9, 0xbe, 0x2a, 0xd6, 0x68, 0x9e, 0x9c, 0x7b, 0x5a, 0xaf, 0x20, - 0xf6, 0xa5, 0x3f, 0x99, 0x61, 0x57, 0xe8, 0x1c, 0xb2, 0xc3, 0xd0, 0x7f, - 0x2c, 0xb5, 0xe9, 0x66, 0x8e, 0x88, 0xec, 0x13, 0x51, 0xbc, 0x8e, 0xb6, - 0xe2, 0x91, 0xbf, 0x5e, 0x8c, 0x1c, 0xdd, 0x0e, 0x0a, 0x13, 0x06, 0xc6, - 0x62, 0x1c, 0x41, 0x8d, 0xa1, 0xc0, 0xf2, 0xfa, 0x76, 0x35, 0xaa, 0x77, - 0x06, 0x3f, 0x76, 0x50, 0xf6, 0x43, 0xf2, 0x25, 0x00, 0x79, 0xde, 0xca, - 0xa1, 0x06, 0x6f, 0xb4, 0x17, 0x4b, 0x99, 0x5a, 0x00, 0x32, 0xd6, 0xb0, - 0x1f, 0x80, 0x53, 0x16, 0xaa, 0x87, 0x72, 0xa2, 0x34, 0xaf, 0x90, 0x3d, - 0x60, 0xde, 0x0e, 0x6d, 0x83, 0xda, 0xb2, 0x11, 0x2f, 0x39, 0xdc, 0x1a, - 0xfe, 0x51, 0x74, 0x10, 0x3c, 0x41, 0xd5, 0x41, 0x65, 0x4a, 0xa0, 0x11, - 0xde, 0x95, 0x34, 0xef, 0xa0, 0xc9, 0xa8, 0xd3, 0xcb, 0xb9, 0x7d, 0x51, - 0x7d, 0xff, 0x26, 0x88, 0xd8, 0x29, 0x0e, 0xa0, 0xd4, 0xa7, 0x07, 0x33, - 0xe7, 0x7d, 0x59, 0x9f, 0x35, 0xc1, 0xb5, 0xf7, 0x78, 0x78, 0x84, 0xf0, - 0x20, 0x41, 0x3f, 0x02, 0x7d, 0x41, 0x90, 0x01, 0x8d, 0xa4, 0xd8, 0xd7, - 0xeb, 0x56, 0x7f, 0x38, 0xbc, 0x1e, 0x15, 0xdf, 0xfc, 0x34, 0xe7, 0x99, - 0xd4, 0x92, 0xd5, 0xf3, 0x9e, 0x16, 0x0b, 0x5c, 0xeb, 0xb6, 0x78, 0xac, - 0x84, 0x06, 0x8e, 0xfe, 0xd0, 0x7c, 0xce, 0x4a, 0x43, 0x49, 0x3b, 0xe1, - 0xab, 0x57, 0xc0, 0x12, 0xd6, 0x9d, 0xa4, 0xee, 0x91, 0x10, 0x81, 0xe2, - 0xfc, 0x02, 0x26, 0x7a, 0xca, 0x81, 0x5b, 0x2f, 0x34, 0x51, 0xdd, 0x25, - 0x4d, 0xc8, 0xf9, 0x3e, 0x59, 0x0f, 0x3d, 0x64, 0x51, 0xbf, 0x42, 0xc4, - 0x92, 0x9d, 0x8f, 0x39, 0x8a, 0x31, 0x09, 0x24, 0x19, 0x44, 0xc0, 0xf4, - 0xea, 0xca, 0x59, 0xcb, 0x86, 0x6c, 0x02, 0x7a, 0xe5, 0x30, 0x79, 0xe2, - 0x2c, 0x76, 0x08, 0x8f, 0x98, 0x0d, 0x4d, 0x12, 0xc3, 0x98, 0xb4, 0x24, - 0x04, 0x4f, 0x51, 0xec, 0x4e, 0xec, 0xbd, 0x8c, 0xc4, 0x79, 0x51, 0x7f, - 0xe1, 0xce, 0x76, 0x28, 0x0b, 0x7b, 0xc5, 0x3f, 0x5b, 0x48, 0x19, 0x76, - 0x68, 0x31, 0x8e, 0x28, 0xff, 0x18, 0x24, 0xe3, 0x91, 0xe7, 0x49, 0x0d, - 0x10, 0xbd, 0x00, 0xc6, 0x58, 0xfd, 0xb6, 0x88, 0x63, 0xbd, 0xb4, 0x4b, - 0xb8, 0xed, 0xdd, 0xb7, 0x53, 0xce, 0x89, 0xdb, 0x7f, 0xf4, 0xc3, 0x21, - 0x31, 0xad, 0x20, 0x78, 0x06, 0x71, 0xaf, 0xc0, 0xe3, 0xdc, 0xb8, 0xf4, - 0x80, 0xc8, 0x33, 0x1d, 0x8b, 0xff, 0x5a, 0x92, 0x68, 0x4d, 0xc1, 0x5b, - 0x58, 0x3e, 0xf6, 0x7f, 0xba, 0x42, 0xa5, 0x6d, 0xec, 0x03, 0x36, 0xc9, - 0x3f, 0x83, 0x1f, 0x0c, 0x33, 0x57, 0x6a, 0x43, 0x5f, 0x11, 0x72, 0x19, - 0x2c, 0xda, 0x71, 0x58, 0xf2, 0x50, 0x50, 0x06, 0x97, 0xd0, 0xdf, 0xd1, - 0x4f, 0x0b, 0x00, 0x1a, 0xea, 0x85, 0x3b, 0x37, 0x2f, 0xf0, 0x40, 0x52, - 0xd9, 0x2a, 0xe8, 0x54, 0xa5, 0xee, 0x0f, 0x49, 0x74, 0x39, 0x96, 0x5d, - 0x60, 0x8f, 0x14, 0x59, 0x86, 0x59, 0x86, 0xfb, 0x67, 0x71, 0x5c, 0x26, - 0x5f, 0xe9, 0xab, 0x32, 0x77, 0x83, 0xdf, 0x02, 0x19, 0x85, 0xae, 0x4d, - 0x7d, 0x9c, 0x8d, 0x4f, 0x61, 0x05, 0x3c, 0x0c, 0xc6, 0x74, 0x9e, 0x36, - 0x33, 0xb8, 0x14, 0x85, 0xab, 0xa2, 0x0b, 0x5d, 0x22, 0xf2, 0x50, 0x3e, - 0xa4, 0x88, 0xac, 0x67, 0xf9, 0x06, 0xe5, 0x30, 0x8e, 0xf9, 0x67, 0x34, - 0xd5, 0x94, 0x5b, 0x35, 0xb7, 0x3d, 0x39, 0x5f, 0x4e, 0xae, 0xfe, 0xf7, - 0x57, 0xd3, 0x95, 0x7b, 0x0a, 0xd9, 0x92, 0x4a, 0x66, 0x29, 0xa0, 0x18, - 0x35, 0x54, 0x14, 0x44, 0x79, 0x72, 0xc3, 0xbc, 0xa8, 0x1a, 0xd3, 0xa3, - 0xbe, 0x6f, 0x9e, 0xcc, 0x68, 0xb6, 0x5f, 0xd4, 0x42, 0xab, 0xe8, 0x09, - 0x60, 0x57, 0x2e, 0xb2, 0x9a, 0x5b, 0x62, 0x38, 0xfb, 0x0a, 0x35, 0x9c, - 0x4f, 0xf7, 0xe0, 0xd2, 0x06, 0x04, 0x1f, 0x79, 0x7f, 0xa7, 0x7b, 0xd3, - 0x63, 0xc9, 0xbd, 0x16, 0x58, 0x38, 0x7b, 0xaa, 0x08, 0xf3, 0x14, 0x6c, - 0x25, 0xf8, 0xa5, 0xe9, 0x4b, 0x45, 0x34, 0x89, 0x76, 0x74, 0xcb, 0x41, - 0x9c, 0x2a, 0xd9, 0xca, 0xb3, 0x12, 0x46, 0x6d, 0x85, 0x4d, 0x63, 0x2d, - 0x24, 0x1b, 0x19, 0x6b, 0x3f, 0x61, 0x6b, 0x4b, 0x15, 0x83, 0x2d, 0x8f, - 0x61, 0xab, 0xd1, 0x55, 0x93, 0x4e, 0x26, 0xd6, 0x7a, 0x0a, 0x8a, 0xff, - 0x58, 0x44, 0xf7, 0x39, 0x31, 0x1a, 0xab, 0xa6, 0x98, 0x31, 0x41, 0x03, - 0xb6, 0xc9, 0xf5, 0x50, 0xe3, 0x7b, 0xc0, 0x59, 0x74, 0x60, 0x91, 0xb4, - 0x79, 0x02, 0x25, 0xc1, 0xb5, 0xbd, 0xcb, 0x6e, 0x40, 0x61, 0xfe, 0x68, - 0x29, 0x83, 0x1b, 0xd2, 0x49, 0xe1, 0x31, 0xde, 0xdd, 0x53, 0xb0, 0xb8, - 0x96, 0xa2, 0xce, 0xea, 0x8b, 0x66, 0x2c, 0x5a, 0x80, 0x51, 0x0b, 0xc1, - 0x2d, 0x9a, 0xfa, 0x9d, 0xc6, 0xcc, 0x2b, 0xbb, 0xaa, 0xce, 0x98, 0xaa, - 0x26, 0x15, 0x8f, 0x4a, 0xe7, 0xdb, 0x17, 0x6c, 0xe5, 0x58, 0xc9, 0xae, - 0xe4, 0x9c, 0x1d, 0xab, 0x59, 0x84, 0x3e, 0x27, 0x76, 0x03, 0xe3, 0x82, - 0x64, 0x6f, 0x6e, 0x6f, 0x63, 0xd2, 0x12, 0x84, 0xe3, 0x9b, 0x9d, 0x7e, - 0x53, 0x1a, 0x54, 0x8d, 0xc1, 0xf0, 0x94, 0xae, 0xad, 0x8f, 0x6a, 0x12, - 0x4e, 0xa7, 0x30, 0xdb, 0x55, 0xbe, 0x09, 0xe2, 0x56, 0x08, 0xc4, 0x3a, - 0xb0, 0x55, 0xb0, 0x24, 0x96, 0xa6, 0x3e, 0x28, 0xd0, 0x35, 0xfb, 0x58, - 0x47, 0xba, 0x2d, 0x51, 0xbb, 0x72, 0x20, 0x59, 0xd2, 0xdd, 0x9c, 0xe2, - 0xb5, 0x31, 0x90, 0xac, 0x74, 0x5d, 0x9f, 0x3d, 0x8c, 0x1c, 0x96, 0xc0, - 0x60, 0x61, 0xa8, 0xbb, 0x3c, 0xb3, 0x6d, 0x6d, 0x92, 0x4a, 0xca, 0xbb, - 0x60, 0x5e, 0x82, 0x0d, 0x7f, 0xab, 0x4b, 0x36, 0x4c, 0x93, 0x0d, 0x88, - 0x71, 0xaf, 0xb6, 0x53, 0xb0, 0x38, 0xb4, 0x1c, 0xb4, 0x7b, 0xd4, 0x13, - 0x32, 0x6c, 0xe4, 0xee, 0x6a, 0xb3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x02, 0x00, 0x00, 0x88, 0x83, 0x91, 0x4c, 0x2e, 0x1e, 0xbe, 0xa4, - 0xb5, 0x96, 0xff, 0x67, 0x50, 0xe9, 0x81, 0x0e, 0x5d, 0x0e, 0xad, 0xc4, - 0x1f, 0xeb, 0x98, 0x38, 0xcc, 0x54, 0x9d, 0x27, 0xa6, 0xf1, 0x37, 0x23, - 0xce, 0xb4, 0x5b, 0xff, 0x12, 0xb1, 0xb8, 0x35, 0x5e, 0x03, 0x02, 0x04, - 0xad, 0xa6, 0x6f, 0x43, 0xfc, 0xe4, 0xbe, 0x0c, 0xe0, 0x93, 0xd5, 0xef, - 0x09, 0xfa, 0x04, 0xe9, 0x5a, 0x22, 0xd4, 0x81, 0xc1, 0x27, 0x4f, 0x5f, - 0x6e, 0x83, 0x5a, 0x8a, 0x2d, 0xbb, 0x8f, 0xa4, 0x91, 0xcc, 0x82, 0x37, - 0x3b, 0x14, 0x98, 0x58, 0x86, 0x44, 0xb7, 0xa9, 0x58, 0xf3, 0x3d, 0x49, - 0x71, 0x7a, 0x37, 0xcd, 0xc5, 0xb9, 0xc9, 0x46, 0xd5, 0xd4, 0x17, 0x60, - 0x1a, 0xbf, 0x93, 0xa9, 0xe9, 0x08, 0x25, 0x40, 0xd1, 0x65, 0xae, 0xdd, - 0x85, 0xa6, 0xcc, 0x06, 0xca, 0x91, 0xe1, 0x63, 0xf9, 0x6b, 0x15, 0xa8, - 0x04, 0x61, 0xd2, 0xa6, 0x59, 0x21, 0x1a, 0x1c, 0xc9, 0xa9, 0xa9, 0xc8, - 0x54, 0x86, 0xac, 0xa5, 0xd6, 0x95, 0x39, 0x83, 0x4b, 0x6b, 0x69, 0xa6, - 0x94, 0xd8, 0xc0, 0xfb, 0x66, 0x0f, 0x3a, 0xbe, 0xc7, 0xf3, 0xcc, 0xd5, - 0xb7, 0x1b, 0x60, 0x02, 0x95, 0x45, 0x4a, 0x12, 0xc9, 0xfe, 0x75, 0x7c, - 0x1b, 0xb2, 0x86, 0x96, 0x28, 0x07, 0xa2, 0x18, 0x7a, 0x6c, 0x90, 0x6f, - 0x32, 0x0c, 0xc8, 0x34, 0xbc, 0x75, 0x4d, 0x96, 0x03, 0xa6, 0x0f, 0x3d, - 0x35, 0x1b, 0x64, 0x76, 0x95, 0x55, 0xff, 0x25, 0xd4, 0x71, 0xcf, 0x8a, - 0x73, 0x6d, 0x9b, 0x74, 0xfe, 0xff, 0x9e, 0x31, 0x9e, 0x5e, 0x89, 0x5a, - 0x1a, 0xeb, 0x8d, 0x06, 0x3b, 0xf2, 0xf6, 0x06, 0x5d, 0xc3, 0xba, 0x04, - 0xca, 0x0f, 0x07, 0x2c, 0xbd, 0x54, 0x52, 0xd9, 0x1c, 0x2f, 0x0e, 0x13, - 0x5e, 0x25, 0x13, 0xe5, 0xd7, 0x8e, 0x19, 0x42, 0x1b, 0x52, 0x2e, 0xd2, - 0x8f, 0xc5, 0x8e, 0x1c, 0x34, 0x2e, 0x4d, 0xd5, 0x51, 0x7d, 0x91, 0x64, - 0xbc, 0xb4, 0x0d, 0xc9, 0xe7, 0x1c, 0x6c, 0x47, 0xe9, 0xbb, 0x67, 0x9a, - 0x96, 0xde, 0xad, 0xff, 0xba, 0x35, 0x25, 0x6d, 0x57, 0xa1, 0x93, 0xfe, - 0xe2, 0x8d, 0x02, 0xeb, 0xf0, 0x2f, 0x54, 0xfd, 0x46, 0xc0, 0x8f, 0xea, - 0x32, 0x7b, 0x57, 0xda, 0xe0, 0x29, 0x1c, 0x19, 0xba, 0xa4, 0xa6, 0x1c, - 0x6e, 0xeb, 0x7a, 0xa8, 0x8a, 0xe1, 0xc6, 0x12, 0xf5, 0xa3, 0x24, 0x1a, - 0x96, 0xe1, 0x02, 0xc0, 0xf4, 0x7d, 0x14, 0x72, 0xd6, 0x12, 0x8e, 0x6c, - 0x8c, 0xd2, 0xfd, 0x88, 0x78, 0x48, 0xf3, 0x74, 0x38, 0x86, 0x04, 0x68, - 0x6d, 0x7c, 0xf4, 0x4c, 0x40, 0x17, 0xf6, 0x8f, 0xb2, 0x6c, 0xd7, 0x66, - 0x66, 0x3b, 0x38, 0xa1, 0xbb, 0x1e, 0xff, 0x72, 0x1f, 0x64, 0x56, 0xc2, - 0x53, 0x1c, 0x6f, 0x84, 0x2b, 0xbd, 0x23, 0xd9, 0xb4, 0x6b, 0x87, 0x79, - 0x99, 0xec, 0x81, 0x8d, 0x1a, 0x58, 0x00, 0xf0, 0x2c, 0xc1, 0xc4, 0x57, - 0x74, 0x0f, 0xce, 0x32, 0xe2, 0x5e, 0xae, 0x02, 0x1c, 0xe8, 0x94, 0xc6, - 0x44, 0xaa, 0x7b, 0x9a, 0x32, 0xb5, 0x33, 0xac, 0xfc, 0x41, 0x65, 0xf2, - 0xca, 0xcc, 0xc6, 0x74, 0x36, 0xb2, 0xc9, 0x0e, 0x26, 0x73, 0xae, 0x68, - 0x98, 0xa4, 0x36, 0xe8, 0x98, 0x39, 0xad, 0x05, 0x3f, 0xca, 0x12, 0xcc, - 0x86, 0xfd, 0xc6, 0x57, 0xf0, 0x02, 0x4e, 0x45, 0xcb, 0x54, 0x34, 0xdd, - 0x66, 0x26, 0xab, 0xda, 0x95, 0xa5, 0x85, 0xec, 0x02, 0x03, 0xb6, 0x29, - 0x30, 0x11, 0x40, 0x54, 0x9a, 0x6a, 0x87, 0x2e, 0x97, 0xa1, 0x7e, 0xeb, - 0x34, 0x39, 0x78, 0x3b, 0xbc, 0x5f, 0x8e, 0xc5, 0x0e, 0x21, 0x29, 0x4b, - 0xb7, 0x1b, 0xe7, 0x14, 0x08, 0x34, 0xb7, 0x9a, 0x0a, 0xb2, 0x6c, 0x25, - 0x76, 0xb5, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xe2, 0x7d, 0x48, 0xdd, 0x1a, 0xcb, 0xb6, 0x5c, 0x6f, 0xbe, 0x32, 0x9d, - 0xd2, 0x2b, 0x9e, 0x10, 0x65, 0xd7, 0x1e, 0xec, 0xc8, 0xb5, 0x10, 0x64, - 0x8f, 0x5d, 0xef, 0xfe, 0x9b, 0x6c, 0x9b, 0x02, 0x6a, 0x6d, 0xf7, 0x98, - 0x7b, 0xf7, 0x17, 0xfd, 0x49, 0x1b, 0x6a, 0xc5, 0x3c, 0xa0, 0xfc, 0xa8, - 0x94, 0x95, 0xed, 0x48, 0x81, 0x04, 0x53, 0x8c, 0xbe, 0xe4, 0x4e, 0xaf, - 0xc1, 0x9d, 0xc3, 0xdf, 0xc2, 0xb5, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xae, 0xb0, 0x67, 0x5b, 0x99, 0x26, 0x07, 0xfb, - 0x6c, 0x98, 0xfe, 0xbb, 0x35, 0xf1, 0x5b, 0x02, 0xc6, 0x03, 0xfc, 0x97, - 0x21, 0x16, 0x8d, 0x48, 0xd4, 0x4f, 0x03, 0xd9, 0x7c, 0x9f, 0xa6, 0x1e, - 0x6f, 0x5a, 0x58, 0x17, 0x6d, 0x26, 0xb4, 0xc5, 0x4c, 0xe5, 0x93, 0x0a, - 0x9c, 0xb2, 0x40, 0xbc, 0x60, 0xc7, 0x2b, 0xdb, 0x3b, 0xc0, 0x3c, 0x5c, - 0x44, 0x4b, 0xdd, 0x58, 0xbe, 0xdc, 0xc5, 0xb5, 0x6a, 0xf9, 0x5e, 0x73, - 0x07, 0x58, 0x8f, 0x45, 0x7b, 0xac, 0xba, 0x82, 0x96, 0x49, 0x4d, 0x22, - 0x70, 0x7a, 0x3d, 0x69, 0x26, 0x8b, 0x88, 0x13, 0xf1, 0x8d, 0xfc, 0xdf, - 0x73, 0xd5, 0x20, 0x3c, 0x52, 0x92, 0x16, 0xb1, 0x6e, 0xb7, 0x41, 0xbe, - 0x23, 0x9b, 0x51, 0xf7, 0xc9, 0x38, 0x8a, 0xc7, 0x6e, 0x68, 0x82, 0xd1, - 0x59, 0x50, 0x09, 0x4b, 0x44, 0x3b, 0x28, 0x06, 0x60, 0x75, 0x7a, 0xe5, - 0xa1, 0x36, 0xbb, 0x62, 0x44, 0xe3, 0xd0, 0x68, 0x14, 0xea, 0xad, 0xf9, - 0x18, 0xcc, 0xd5, 0x42, 0x5d, 0x18, 0x53, 0xe6, 0x4a, 0xfe, 0xde, 0x32, - 0xe1, 0xe7, 0xf8, 0x8c, 0x9d, 0x35, 0xf4, 0x4a, 0xcb, 0x23, 0x2f, 0x91, - 0xb5, 0xb0, 0xb2, 0x01, 0x5c, 0x22, 0x8c, 0x42, 0x42, 0xd5, 0xf0, 0x82, - 0x6f, 0x9f, 0x64, 0xe5, 0x99, 0x4d, 0x36, 0x0b, 0xfc, 0x78, 0x38, 0x30, - 0x47, 0x8f, 0x0b, 0x57, 0x86, 0x4f, 0x1b, 0xc9, 0x05, 0x0e, 0x08, 0xc4, - 0xf4, 0xab, 0x9e, 0x90, 0xb4, 0x4f, 0x36, 0x54, 0xe8, 0xa1, 0x3f, 0x90, - 0xd2, 0xf3, 0xb4, 0xb4, 0xdd, 0xf3, 0x43, 0x2f, 0xc4, 0x43, 0xbb, 0x99, - 0x8e, 0xb8, 0x61, 0x59, 0x5e, 0xfa, 0x1b, 0x3c, 0xc1, 0xeb, 0x9d, 0x35, - 0x62, 0x34, 0x82, 0x45, 0xef, 0x41, 0xe9, 0xfc, 0x35, 0xae, 0xb4, 0x0b, - 0xce, 0x52, 0x5b, 0x40, 0x7d, 0xdd, 0x86, 0x83, 0x52, 0x74, 0x77, 0x11, - 0xc2, 0x9b, 0x8c, 0xa3, 0x63, 0xc2, 0x2d, 0xdd, 0x8c, 0x76, 0x13, 0xc5, - 0xc0, 0xde, 0x3e, 0x6b, 0xe1, 0x0f, 0xeb, 0x0f, 0x0a, 0x25, 0x41, 0x2f, - 0x8b, 0x4a, 0x98, 0x30, 0xcb, 0x1a, 0x43, 0xa3, 0xc1, 0xcc, 0x44, 0x9a, - 0x6c, 0xdc, 0x92, 0x40, 0xc4, 0x7a, 0x1f, 0x8a, 0x6f, 0x74, 0xf3, 0xf5, - 0x52, 0x72, 0xf7, 0x81, 0x6e, 0x74, 0x75, 0xe6, 0xea, 0xd9, 0x57, 0x91, - 0xae, 0xf2, 0x3f, 0x35, 0x4b, 0x99, 0xd9, 0x3f, 0x85, 0xe0, 0x92, 0xaa, - 0x35, 0xac, 0x28, 0xbf, 0x43, 0xb8, 0xad, 0xc7, 0xc5, 0xf6, 0x15, 0x2f, - 0x7c, 0xfb, 0x34, 0x48, 0xf3, 0x04, 0x12, 0xf4, 0x2f, 0x92, 0x74, 0xc8, - 0xea, 0xbc, 0x24, 0x6e, 0x3b, 0x0e, 0x9e, 0xf0, 0xaf, 0x02, 0x97, 0x95, - 0xbc, 0x90, 0x7f, 0xc4, 0xf8, 0xe2, 0x04, 0x9a, 0x8f, 0xfc, 0xbc, 0x50, - 0xfe, 0xf7, 0x89, 0x17, 0x2c, 0xdb, 0xd6, 0x5e, 0xbf, 0xd9, 0x8e, 0x89, - 0x8b, 0x06, 0x1d, 0x0b, 0x81, 0x2a, 0x55, 0x5c, 0x5f, 0xb6, 0xa6, 0xa5, - 0xd2, 0xaa, 0x79, 0x9c, 0x39, 0x31, 0x76, 0x03, 0x98, 0x42, 0xd6, 0xb7, - 0x37, 0x1f, 0xc8, 0x51, 0x8a, 0x1c, 0x5d, 0xcd, 0x9c, 0x78, 0xa4, 0x22, - 0x6e, 0x12, 0x10, 0x0a, 0x33, 0xc9, 0xe0, 0xfe, 0xfc, 0xe8, 0x15, 0xe7, - 0xef, 0xd8, 0x6d, 0xc7, 0xc9, 0xc2, 0x8e, 0x18, 0x82, 0x2f, 0xa6, 0x09, - 0x8a, 0xdc, 0x41, 0x6b, 0x89, 0xea, 0xd9, 0xd6, 0x96, 0xfd, 0xba, 0x6e, - 0xae, 0x2d, 0x0c, 0xf9, 0x3c, 0x4c, 0x1a, 0xfa, 0x98, 0x83, 0x51, 0x45, - 0x9d, 0x1e, 0xa5, 0xc1, 0x81, 0x54, 0x37, 0x5d, 0x28, 0xca, 0xa6, 0xfe, - 0x48, 0xf4, 0x77, 0x17, 0x92, 0x1d, 0x0c, 0xb3, 0x39, 0x77, 0x22, 0xd9, - 0xc7, 0xc2, 0xaf, 0x70, 0x0a, 0xd3, 0xa6, 0x57, 0x69, 0xfb, 0xb9, 0xe0, - 0xc4, 0x73, 0x7a, 0x68, 0xee, 0x27, 0x6e, 0x3a, 0x6e, 0xae, 0x32, 0xf6, - 0x09, 0xb3, 0x0b, 0x40, 0x72, 0xc6, 0x26, 0x6e, 0xc5, 0x88, 0x6b, 0xce, - 0x99, 0x88, 0x60, 0x6f, 0x6e, 0xa9, 0xe6, 0xd7, 0x35, 0x5e, 0x3b, 0x36, - 0x0d, 0x14, 0xb8, 0x2f, 0xde, 0x67, 0xc8, 0x2e, 0x52, 0xc1, 0xf1, 0x58, - 0x87, 0x32, 0x2a, 0x52, 0x21, 0x27, 0x1e, 0x04, 0xed, 0xc4, 0x82, 0xd7, - 0xeb, 0x85, 0x12, 0x3e, 0xea, 0xd0, 0x07, 0xa0, 0x80, 0x48, 0xe9, 0xbd, - 0x9b, 0x3a, 0x8e, 0x8b, 0xa0, 0xfc, 0x07, 0xf0, 0x69, 0x4e, 0xc7, 0x1d, - 0xd9, 0x9a, 0x73, 0x18, 0x63, 0xb8, 0xe6, 0x4a, 0xa0, 0x81, 0xf0, 0xdb, - 0xb9, 0x88, 0xf4, 0x2b, 0x1f, 0x0d, 0xda, 0x31, 0xc0, 0xb0, 0x55, 0x79, - 0x56, 0x48, 0x22, 0xbb, 0x49, 0x7f, 0xb1, 0xf1, 0xf6, 0x6f, 0x42, 0xd3, - 0xba, 0x68, 0x3a, 0x8f, 0xe7, 0xac, 0x53, 0x30, 0x96, 0xec, 0x51, 0x7d, - 0xfc, 0xc0, 0x35, 0xe9, 0x59, 0xe7, 0x0e, 0xed, 0x29, 0x46, 0x50, 0x3c, - 0x4b, 0x36, 0xc6, 0x2a, 0xaa, 0x3b, 0xbe, 0xce, 0xd3, 0xda, 0x4d, 0x65, - 0xb0, 0xe8, 0x52, 0x68, 0xf0, 0x23, 0xde, 0x02, 0x77, 0xb3, 0xcc, 0xce, - 0x78, 0xdd, 0x8c, 0xf8, 0xbe, 0x5d, 0x0d, 0xa9, 0xb6, 0x96, 0x85, 0xbf, - 0x92, 0x2a, 0x6b, 0x1b, 0xe8, 0x76, 0x05, 0x13, 0x30, 0xd8, 0x3d, 0x80, - 0xaa, 0xa2, 0xa3, 0xbc, 0x07, 0xba, 0x9c, 0x75, 0x5b, 0x42, 0x03, 0xd8, - 0xde, 0x42, 0x44, 0xf7, 0x29, 0x43, 0x29, 0x0d, 0x48, 0x2b, 0x02, 0xd0, - 0xcc, 0xe9, 0x17, 0x47, 0x23, 0x73, 0x6d, 0xc5, 0x91, 0x6d, 0x4e, 0xc5, - 0xcf, 0xc3, 0x58, 0xaf, 0x6e, 0xa2, 0x9e, 0xe7, 0xe1, 0x88, 0xac, 0x62, - 0xff, 0xbc, 0x69, 0x57, 0xad, 0x0f, 0x08, 0xf8, 0x32, 0xfd, 0x79, 0xcb, - 0x30, 0xbc, 0xd2, 0xe5, 0x20, 0xd9, 0x0f, 0xd1, 0x33, 0xbf, 0xe4, 0x49, - 0x7a, 0x2b, 0x5c, 0xb3, 0x63, 0x13, 0x4d, 0xed, 0x17, 0xe7, 0x5b, 0xf4, - 0x36, 0x9d, 0x3c, 0x4e, 0x51, 0xb2, 0xf7, 0xf2, 0xcd, 0xfb, 0xec, 0x42, - 0x79, 0x46, 0xae, 0x18, 0x50, 0xdf, 0xbf, 0x5b, 0xb1, 0x9a, 0x49, 0x22, - 0xae, 0xe9, 0xf3, 0x86, 0x3f, 0xe0, 0xb4, 0xc6, 0x9c, 0x08, 0xd6, 0xd9, - 0xf4, 0x68, 0xbb, 0x33, 0x0e, 0x59, 0x3d, 0x76, 0xf0, 0xd7, 0x54, 0x04, - 0x19, 0x66, 0xee, 0x61, 0x11, 0x0d, 0x48, 0x10, 0x21, 0x16, 0x7c, 0xac, - 0x49, 0xab, 0xe0, 0x19, 0x85, 0x93, 0x48, 0x65, 0x7c, 0x5e, 0x6c, 0x1a, - 0xf5, 0xb0, 0xc6, 0x80, 0xa1, 0x2a, 0xd5, 0x71, 0x42, 0xec, 0x2f, 0x25, - 0xf7, 0xb8, 0x84, 0xcd, 0xf0, 0x5c, 0xcd, 0xee, 0x44, 0xcb, 0xeb, 0x74, - 0x96, 0x3c, 0xb0, 0x56, 0xcb, 0xaf, 0x7e, 0x9e, 0x4a, 0x12, 0x06, 0xae, - 0x57, 0x43, 0x2d, 0xb2, 0x11, 0x96, 0x05, 0xdb, 0xb3, 0x1a, 0x01, 0xa7, - 0x1d, 0x02, 0x81, 0x1c, 0x36, 0x41, 0x65, 0xf0, 0x67, 0xd6, 0xd0, 0x0f, - 0xec, 0x34, 0x7d, 0xd3, 0x89, 0xac, 0x60, 0x67, 0x95, 0x81, 0x84, 0xe7, - 0xbb, 0x9a, 0x59, 0x36, 0x3b, 0xde, 0xa4, 0x88, 0xda, 0xf2, 0xd2, 0xa2, - 0x0c, 0xba, 0xfb, 0x93, 0xbf, 0xc8, 0xad, 0xe8, 0x57, 0xa0, 0x2b, 0xbb, - 0x4e, 0xa9, 0x38, 0xe7, 0x86, 0x6b, 0x95, 0x34, 0x24, 0x96, 0xc0, 0x09, - 0xd9, 0xfd, 0x5f, 0x1c, 0x93, 0xd9, 0x72, 0xfa, 0xc4, 0x14, 0x72, 0x9c, - 0x19, 0x6f, 0xee, 0x12, 0x17, 0xee, 0x65, 0xb4, 0x8c, 0x83, 0x39, 0x3c, - 0x0f, 0xbf, 0x25, 0xcf, 0xee, 0x05, 0x8c, 0x6a, 0x56, 0x18, 0xf0, 0x20, - 0x72, 0xc1, 0xbf, 0xe4, 0xce, 0x37, 0xbf, 0x2b, 0xba, 0x70, 0x1e, 0xc2, - 0xc8, 0xcd, 0x58, 0xb9, 0x60, 0xc7, 0xfb, 0xd0, 0xce, 0xb9, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x7c, 0x63, 0x50, 0x90, - 0xcb, 0x9c, 0xce, 0x59, 0xb1, 0x47, 0xb0, 0x49, 0x9b, 0xfc, 0xfb, 0x3d, - 0x3d, 0x62, 0xcf, 0x58, 0x4c, 0x2a, 0x79, 0xf0, 0x72, 0x7f, 0x81, 0x41, - 0xac, 0x82, 0x2d, 0xa9, 0xf0, 0x0e, 0x4d, 0xd2, 0xe0, 0xbd, 0xca, 0x17, - 0xb7, 0x59, 0x9f, 0xdb, 0xfe, 0x51, 0x90, 0x88, 0xb9, 0xeb, 0x4e, 0xac, - 0x80, 0x30, 0x64, 0xc4, 0x49, 0xd1, 0xb6, 0x65, 0x67, 0xef, 0x9d, 0x5c, - 0x04, 0xe0, 0x9d, 0xbe, 0x47, 0x75, 0x9b, 0x6e, 0x30, 0x76, 0xad, 0x37, - 0x9a, 0x56, 0xff, 0xcd, 0x40, 0x26, 0x3e, 0xe2, 0x7d, 0x30, 0x55, 0x09, - 0x92, 0x25, 0x36, 0x2f, 0xf8, 0x55, 0xb8, 0x9b, 0x66, 0x49, 0x41, 0x9d, - 0x78, 0x6d, 0x3f, 0x54, 0x41, 0x01, 0x93, 0x9c, 0x5e, 0x0c, 0x4a, 0x38, - 0x79, 0x76, 0xb4, 0x98, 0xae, 0xf9, 0x99, 0x21, 0x05, 0x6a, 0xfb, 0xbc, - 0x44, 0xf7, 0xdc, 0x85, 0x5e, 0x5f, 0x18, 0x49, 0x22, 0x11, 0x6d, 0xa5, - 0x9e, 0x6b, 0x59, 0x60, 0xf8, 0x73, 0x8b, 0xcb, 0x38, 0xbb, 0xc9, 0xbf, - 0x49, 0x0e, 0x57, 0x65, 0x48, 0x41, 0x41, 0xa2, 0x40, 0x67, 0x91, 0x1d, - 0x54, 0xac, 0xa7, 0xef, 0x16, 0x8b, 0xc7, 0xd1, 0xe6, 0xdb, 0xc5, 0x9c, - 0xd4, 0x04, 0x67, 0xd8, 0x75, 0x21, 0x2b, 0x1d, 0x11, 0xc1, 0x79, 0x45, - 0xb4, 0x91, 0x7a, 0x97, 0x00, 0xde, 0xc6, 0xc5, 0x8a, 0xd1, 0xd7, 0xea, - 0xc1, 0x22, 0xe1, 0x58, 0x61, 0xf2, 0x89, 0x3d, 0xdb, 0x04, 0x3d, 0xe4, - 0xe9, 0xe7, 0xbf, 0x4b, 0x99, 0x8a, 0xc6, 0xf2, 0x09, 0xc4, 0xe2, 0x6d, - 0x0b, 0xda, 0x13, 0xfb, 0xff, 0xbf, 0x0b, 0xfc, 0x78, 0x33, 0xb8, 0x7b, - 0x3e, 0xd8, 0xba, 0x27, 0xba, 0xae, 0xdf, 0xce, 0xea, 0x80, 0x08, 0x38, - 0xd8, 0x33, 0x00, 0xa9, 0xb6, 0x88, 0x48, 0xa9, 0x3b, 0x54, 0xf0, 0x95, - 0xda, 0xba, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0xb1, 0xd7, 0x8d, 0x6c, 0xb9, 0x96, 0xdc, 0x64, 0x9b, 0x0c, 0x74, 0x54, - 0x59, 0x82, 0xf6, 0x6e, 0x7c, 0x4e, 0x23, 0x83, 0x04, 0x2e, 0x49, 0xfb, - 0x56, 0x4b, 0xcd, 0x0d, 0x76, 0x29, 0xb1, 0xce, 0x40, 0xa3, 0xd0, 0x02, - 0x16, 0x8e, 0x1c, 0x0a, 0x00, 0x5b, 0x8c, 0x06, 0xf9, 0x07, 0x97, 0x12, - 0x0c, 0x33, 0xd5, 0x48, 0x6d, 0xae, 0x7d, 0x2c, 0x8f, 0x74, 0x32, 0x24, - 0xcf, 0x91, 0xd7, 0xbe, 0xb2, 0x05, 0xcf, 0x2f, 0x93, 0xd5, 0x43, 0x90, - 0xce, 0x02, 0x97, 0xf8, 0x51, 0xb3, 0xba, 0x56, 0x5d, 0x94, 0x41, 0xa4, - 0x11, 0xf3, 0x21, 0xc0, 0xcc, 0x28, 0xf8, 0x5a, 0x00, 0x0a, 0xd4, 0x53, - 0xdd, 0xac, 0xfe, 0x25, 0x03, 0xea, 0x2b, 0x6b, 0x9d, 0x7e, 0x1a, 0xe1, - 0x5f, 0x5c, 0xa7, 0x47, 0xa2, 0x72, 0x4f, 0x92, 0x60, 0x25, 0x7c, 0x1c, - 0xa5, 0x34, 0xa6, 0x86, 0x0e, 0xda, 0x8f, 0x3f, 0xec, 0xe2, 0xe4, 0xad, - 0xa9, 0x41, 0xcc, 0x3d, 0x94, 0x43, 0xfd, 0x28, 0xd8, 0xb0, 0x0f, 0x05, - 0x9e, 0x2b, 0x27, 0x3f, 0xe0, 0x84, 0xbc, 0x9e, 0x7a, 0xa5, 0x83, 0x3d, - 0x3b, 0xac, 0x83, 0xd3, 0x16, 0x92, 0x8c, 0xd2, 0x4a, 0x81, 0xdd, 0xba, - 0x0a, 0xb7, 0xc5, 0x9f, 0x83, 0x0f, 0x78, 0xb8, 0xab, 0x2d, 0xca, 0xf8, - 0x6c, 0x06, 0xd7, 0x82, 0xb8, 0x61, 0x7d, 0x2a, 0x31, 0x3a, 0x39, 0x97, - 0x5f, 0xc7, 0x00, 0x6e, 0x46, 0xf2, 0xc5, 0x12, 0x71, 0x55, 0x5b, 0x10, - 0xaf, 0xbb, 0x07, 0x4c, 0x2f, 0xa3, 0x51, 0x53, 0x22, 0x20, 0xab, 0xed, - 0x02, 0x95, 0xc6, 0x5f, 0xaa, 0xb8, 0xc0, 0xcb, 0xe5, 0xe0, 0x25, 0x97, - 0xf7, 0xda, 0x1d, 0xd8, 0x5a, 0xff, 0x76, 0x0c, 0x3e, 0x33, 0x1b, 0x7a, - 0x15, 0xb8, 0x34, 0x75, 0xcf, 0xe9, 0xf3, 0x53, 0x61, 0x03, 0x2d, 0x52, - 0x29, 0x69, 0x3a, 0xc3, 0xd9, 0x22, 0xc0, 0x2d, 0x80, 0xed, 0x66, 0xc4, - 0xf4, 0x89, 0x60, 0x14, 0xdb, 0xec, 0x7d, 0xcc, 0x99, 0x5c, 0x94, 0x27, - 0xab, 0xed, 0xd2, 0x17, 0xf4, 0x36, 0xfc, 0x7e, 0x99, 0x98, 0xb6, 0x86, - 0xb6, 0x7c, 0x54, 0xd6, 0xec, 0xb5, 0xad, 0x62, 0xcc, 0xb0, 0xf7, 0x8c, - 0x52, 0x99, 0xf2, 0x44, 0x27, 0x3a, 0xb0, 0xff, 0x8f, 0x09, 0xae, 0xe1, - 0x61, 0xd8, 0x9f, 0xdd, 0x2f, 0x6b, 0xea, 0xd0, 0x12, 0x70, 0x8c, 0x9d, - 0x8f, 0x4c, 0x36, 0x98, 0x1e, 0x2e, 0xb5, 0x50, 0x63, 0x33, 0x9c, 0x4b, - 0xc3, 0xd4, 0xa0, 0xe6, 0x96, 0x96, 0x75, 0xfd, 0x8a, 0xc4, 0x0c, 0xa7, - 0xea, 0x9d, 0xf1, 0x23, 0x9e, 0x38, 0xff, 0x1a, 0x67, 0x36, 0x5f, 0x5f, - 0x17, 0x88, 0x1a, 0x43, 0x25, 0xea, 0x76, 0xb5, 0xcd, 0xce, 0x43, 0xf8, - 0x71, 0x2b, 0xdb, 0xf0, 0xcd, 0x76, 0xbd, 0x94, 0x57, 0xdb, 0x77, 0xcd, - 0xb2, 0x8f, 0xd1, 0xc0, 0xeb, 0x00, 0x61, 0x7f, 0x66, 0xb0, 0x43, 0x6e, - 0xe0, 0x9f, 0x11, 0x0e, 0x65, 0xf7, 0x4e, 0x00, 0x74, 0xc3, 0xeb, 0xb1, - 0xeb, 0x0c, 0x24, 0x5d, 0x15, 0x56, 0x16, 0x47, 0x87, 0xcf, 0x34, 0xbe, - 0x2a, 0xdd, 0x77, 0x55, 0xa4, 0x09, 0x15, 0x79, 0x8c, 0xaa, 0xce, 0x32, - 0x90, 0x9b, 0x16, 0x40, 0x94, 0x7f, 0x19, 0x27, 0xbc, 0xbf, 0x45, 0x4b, - 0xa5, 0xf0, 0xd0, 0x9e, 0x5b, 0xb9, 0x46, 0x6e, 0x72, 0x8f, 0x49, 0x3b, - 0x7a, 0xc1, 0x92, 0xb0, 0xd5, 0x25, 0x1b, 0x0b, 0xf3, 0xd0, 0x8a, 0x47, - 0x8b, 0xbe, 0xa4, 0xf9, 0x6a, 0x09, 0x84, 0x9a, 0x5b, 0x5b, 0xea, 0xbb, - 0x6f, 0xd8, 0xaf, 0xcd, 0x67, 0x9b, 0x79, 0x7c, 0x8f, 0xcc, 0xd7, 0x5f, - 0x3a, 0xc3, 0xd0, 0xb7, 0xba, 0x28, 0x83, 0x81, 0x4a, 0x05, 0x51, 0xaf, - 0xa0, 0x52, 0x34, 0xe3, 0x4f, 0xec, 0x82, 0xdc, 0x97, 0xd8, 0x69, 0xb2, - 0x0d, 0x68, 0x35, 0x87, 0x58, 0xc0, 0xcf, 0x58, 0x0d, 0xf6, 0x6b, 0x6d, - 0x2a, 0xc0, 0x72, 0xe4, 0x90, 0x8c, 0x7b, 0x45, 0xba, 0xf1, 0x13, 0x6f, - 0x8c, 0xd2, 0xdd, 0xc5, 0x8e, 0xc8, 0xec, 0xf9, 0xfb, 0xde, 0xe5, 0xaa, - 0xcb, 0xc0, 0xff, 0x77, 0x2d, 0x99, 0xb1, 0x69, 0x7f, 0xe3, 0x38, 0x61, - 0x35, 0xb6, 0x45, 0xdd, 0x73, 0x45, 0x84, 0x89, 0x1b, 0x96, 0x7e, 0x6a, - 0x1d, 0xd9, 0xe6, 0x76, 0xa8, 0x16, 0x0f, 0x42, 0xc9, 0x41, 0xec, 0x5d, - 0x25, 0x01, 0xb0, 0x45, 0xa6, 0xaa, 0x69, 0x87, 0x11, 0xa1, 0xb8, 0x9e, - 0x68, 0x48, 0x68, 0xe9, 0xb5, 0xc2, 0xff, 0x83, 0x8f, 0x71, 0xb9, 0xd7, - 0xbb, 0xae, 0x59, 0x8b, 0x1b, 0x4c, 0x44, 0xd8, 0xe3, 0xce, 0xab, 0x88, - 0xfb, 0x64, 0xd9, 0x61, 0x5a, 0x7d, 0xce, 0x3a, 0x27, 0xb5, 0xa3, 0xfd, - 0x5d, 0xa3, 0xb8, 0xa1, 0x15, 0x63, 0x0b, 0x75, 0x39, 0xc3, 0xa4, 0xfb, - 0x60, 0x53, 0xfd, 0x11, 0x21, 0x35, 0x0f, 0x19, 0x28, 0x14, 0xcd, 0x8a, - 0xcf, 0x33, 0xaa, 0x4f, 0x6a, 0x1e, 0x56, 0x87, 0xd5, 0x6e, 0x43, 0x9b, - 0xa3, 0x72, 0x95, 0x8c, 0x34, 0xa2, 0xac, 0x11, 0x76, 0x95, 0xd7, 0xdd, - 0xbf, 0x10, 0xf4, 0x0f, 0x2a, 0x64, 0xd2, 0x4d, 0x7b, 0xc6, 0x9b, 0x7d, - 0xf7, 0xa5, 0xb3, 0x84, 0x9a, 0x9a, 0x5e, 0xcf, 0x7f, 0x95, 0x6d, 0x44, - 0xd1, 0xb2, 0x19, 0xbb, 0xed, 0x37, 0x42, 0x4b, 0x4b, 0x6d, 0xb7, 0x10, - 0x02, 0x5f, 0x00, 0x1f, 0x24, 0xce, 0xb2, 0x8b, 0x3e, 0x7d, 0xc6, 0x6e, - 0x6c, 0x90, 0x75, 0xad, 0x3f, 0x9d, 0x63, 0x04, 0x76, 0x20, 0x7a, 0x56, - 0x48, 0xa1, 0x6a, 0x37, 0x74, 0xd2, 0xb7, 0x4f, 0xa3, 0x64, 0x62, 0xaa, - 0xce, 0x75, 0x8c, 0x15, 0x75, 0x79, 0xa0, 0xbd, 0xdd, 0x01, 0x46, 0xca, - 0xa0, 0x31, 0x1a, 0x16, 0x1f, 0xef, 0x8b, 0xc6, 0x54, 0x57, 0xfa, 0x6e, - 0x43, 0xdf, 0xb0, 0x99, 0xed, 0xa4, 0xcb, 0xeb, 0x91, 0x35, 0x14, 0x0c, - 0xa9, 0x1d, 0xb5, 0xa9, 0x32, 0x99, 0xe3, 0x89, 0x74, 0xaa, 0xa4, 0x65, - 0x1e, 0x82, 0x47, 0xfa, 0x37, 0x23, 0xe5, 0x86, 0xb6, 0xc0, 0xb6, 0x89, - 0x9a, 0xd9, 0xae, 0x29, 0x39, 0x7b, 0x66, 0xc7, 0x5b, 0x02, 0x08, 0x86, - 0xd4, 0xf0, 0x75, 0xc2, 0x05, 0x86, 0xc3, 0x75, 0xd2, 0x2a, 0x1e, 0xec, - 0x6e, 0x75, 0x29, 0x58, 0x8c, 0x25, 0x3b, 0x95, 0x21, 0xde, 0x42, 0xd5, - 0xb7, 0x15, 0x30, 0x09, 0x49, 0x78, 0x55, 0xd5, 0xf2, 0x30, 0x80, 0x93, - 0x8a, 0xce, 0x84, 0x27, 0xdb, 0x4a, 0x09, 0x30, 0x0c, 0x7f, 0x4d, 0xd1, - 0x0f, 0xda, 0x66, 0x58, 0xe1, 0x01, 0xfd, 0x75, 0x83, 0xf5, 0x39, 0x2e, - 0xe2, 0x6b, 0xde, 0xff, 0x20, 0x8a, 0xf7, 0xcc, 0x81, 0x8e, 0x99, 0xb4, - 0xeb, 0x76, 0x74, 0x38, 0x2b, 0xe0, 0x6d, 0x61, 0x8f, 0x39, 0x59, 0x10, - 0x7d, 0xb5, 0xd3, 0x14, 0x96, 0x04, 0x1d, 0x22, 0x89, 0xef, 0x15, 0x7c, - 0x28, 0x5a, 0xd6, 0x8d, 0xf3, 0xb7, 0x6a, 0x9a, 0xce, 0x21, 0x77, 0xfd, - 0x4f, 0x22, 0x26, 0x28, 0xb8, 0xb5, 0xb3, 0x73, 0xfd, 0x2a, 0x7b, 0x42, - 0x26, 0x77, 0x41, 0x93, 0xed, 0xf9, 0x8f, 0xa9, 0x92, 0xd5, 0x9f, 0x2e, - 0x60, 0xec, 0x60, 0x98, 0xf1, 0xd5, 0x11, 0xe2, 0xe0, 0xd7, 0x45, 0xa7, - 0xe4, 0xf2, 0x82, 0x61, 0x2f, 0x41, 0x1b, 0xd9, 0x8e, 0x78, 0xd5, 0x6b, - 0x68, 0x74, 0xf0, 0xc3, 0x83, 0x01, 0x16, 0x60, 0x6e, 0x34, 0x88, 0x45, - 0x8a, 0x86, 0x44, 0x5b, 0xa5, 0xa8, 0x55, 0xbc, 0xfa, 0x8f, 0xbd, 0x93, - 0x95, 0x3f, 0xab, 0x19, 0x54, 0x8f, 0x06, 0x8e, 0xca, 0x0b, 0x4a, 0x18, - 0x3f, 0x7a, 0x9c, 0x3f, 0xe6, 0xbe, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x81, 0x32, 0x41, 0x46, 0x59, 0x26, 0xf4, 0xef, - 0x93, 0x9f, 0x04, 0xc2, 0x67, 0x13, 0x32, 0x45, 0xc0, 0x79, 0x70, 0x27, - 0x21, 0x2b, 0xaf, 0x35, 0xf3, 0xc4, 0x88, 0x52, 0x28, 0xea, 0xca, 0x8a, - 0x08, 0x01, 0x6f, 0x61, 0xab, 0x10, 0xa3, 0xf0, 0x6b, 0x3b, 0x54, 0x64, - 0xf1, 0x63, 0x83, 0x38, 0x2b, 0x26, 0x18, 0x5a, 0x67, 0xc4, 0x67, 0x38, - 0x3f, 0x2c, 0x9a, 0xc9, 0x48, 0x33, 0x77, 0xb4, 0xb2, 0xc2, 0xc7, 0x08, - 0x21, 0x5e, 0xc4, 0x19, 0x59, 0xe1, 0xfa, 0x32, 0xa4, 0x4c, 0x3e, 0xba, - 0x65, 0x92, 0x98, 0x39, 0x71, 0x2f, 0x99, 0x08, 0xf8, 0xb3, 0x7a, 0x03, - 0x53, 0xd7, 0x68, 0xb2, 0x5e, 0xb0, 0xef, 0xe0, 0x1e, 0x7d, 0xb2, 0x23, - 0x5d, 0x2b, 0xd7, 0x09, 0xa6, 0x78, 0xa4, 0x7c, 0x08, 0xed, 0x8a, 0xf6, - 0x96, 0xa0, 0x10, 0x17, 0x62, 0x8b, 0x8a, 0xa0, 0xac, 0x22, 0x67, 0x02, - 0xa8, 0x66, 0x1a, 0xb5, 0x02, 0xde, 0xa5, 0xfa, 0x69, 0x29, 0x5f, 0x24, - 0x89, 0x46, 0x68, 0xd6, 0x51, 0x2a, 0xfe, 0x88, 0xf0, 0x40, 0xde, 0xd1, - 0x12, 0x2e, 0xed, 0x13, 0x7b, 0x49, 0xf6, 0xe1, 0x7a, 0xcf, 0x61, 0xcb, - 0x70, 0x9d, 0xaa, 0x51, 0x07, 0xc2, 0x54, 0x76, 0x89, 0x29, 0x94, 0x29, - 0x8b, 0x0e, 0xf5, 0xe8, 0x81, 0xc7, 0xdb, 0x59, 0x1e, 0x75, 0xda, 0x6a, - 0x94, 0x18, 0x16, 0xae, 0xbb, 0x43, 0x87, 0x56, 0x66, 0x8b, 0x84, 0xe9, - 0xa9, 0xd0, 0xd2, 0x8f, 0x5b, 0xbf, 0x1d, 0x24, 0x3a, 0xb7, 0x64, 0xff, - 0xe9, 0x22, 0x21, 0x65, 0xaf, 0x2b, 0x45, 0x8d, 0x28, 0xea, 0xbc, 0x07, - 0x10, 0x6e, 0xfb, 0x4d, 0x6f, 0x35, 0xe5, 0xeb, 0x5d, 0x29, 0x72, 0xe1, - 0x94, 0xad, 0xed, 0x25, 0xd7, 0x39, 0x63, 0x32, 0x37, 0x0b, 0xb2, 0xd7, - 0x54, 0x1f, 0xe4, 0x0d, 0xe7, 0xb3, 0xd1, 0xa6, 0x2a, 0xcf, 0x8e, 0x97, - 0xf1, 0xa8, 0xfc, 0xb1, 0x61, 0xdc, 0xb4, 0x8f, 0x29, 0xa2, 0x68, 0x4a, - 0xe6, 0x2f, 0x8a, 0x69, 0x2c, 0xa1, 0x1d, 0xe2, 0x9e, 0x65, 0x71, 0xb7, - 0x83, 0xef, 0x63, 0xf5, 0x36, 0xdc, 0xa0, 0x94, 0x5a, 0x45, 0x8a, 0x85, - 0x5e, 0x28, 0x86, 0x21, 0xd2, 0xbf, 0x7a, 0x2f, 0x76, 0x1c, 0x2a, 0x15, - 0xb2, 0xe8, 0xaf, 0x63, 0x37, 0xbe, 0xd8, 0x0a, 0xef, 0x54, 0xee, 0xe6, - 0xd9, 0xb3, 0xdb, 0x41, 0x55, 0xba, 0xd8, 0x14, 0x7c, 0x10, 0x61, 0x06, - 0x40, 0x45, 0x69, 0x37, 0x60, 0xf7, 0x6a, 0x7a, 0x23, 0x70, 0x30, 0x57, - 0x3e, 0xe5, 0x12, 0x24, 0xbc, 0x5e, 0x82, 0x89, 0xd8, 0x37, 0xc9, 0x33, - 0xb9, 0x38, 0xa5, 0xba, 0xed, 0xdd, 0x93, 0x58, 0x81, 0x15, 0xec, 0x15, - 0x70, 0x2f, 0x30, 0xfa, 0xaf, 0xf7, 0xf5, 0xcb, 0x41, 0x74, 0xea, 0xc0, - 0x91, 0xbe, 0x53, 0x4c, 0xc2, 0x74, 0x1b, 0x5b, 0x8c, 0x74, 0xd8, 0xc3, - 0x4a, 0x12, 0xaa, 0x57, 0xd6, 0x61, 0xb1, 0xb8, 0x81, 0x5d, 0x81, 0x37, - 0x1e, 0x5b, 0x3d, 0x5a, 0xbc, 0xa6, 0xb2, 0x27, 0xe3, 0x01, 0x4c, 0xf0, - 0xad, 0x7b, 0xdf, 0x50, 0xf9, 0xd7, 0xb7, 0xcc, 0xa8, 0x5c, 0x3d, 0x9a, - 0xb7, 0x60, 0x3e, 0x63, 0x3f, 0x6a, 0x08, 0x0b, 0x82, 0xdc, 0x3e, 0xfa, - 0x24, 0x33, 0xd3, 0x01, 0xbf, 0xef, 0xeb, 0x52, 0x3f, 0x91, 0x61, 0xda, - 0xe2, 0x26, 0x10, 0xdf, 0xe4, 0x9b, 0x77, 0x91, 0x22, 0xc5, 0x4e, 0x9c, - 0x0b, 0x32, 0xff, 0x27, 0x85, 0x85, 0x0c, 0x99, 0x50, 0x8f, 0xad, 0x5d, - 0x06, 0x18, 0x52, 0xb4, 0x64, 0x09, 0xc4, 0xa4, 0x84, 0xd4, 0x81, 0x07, - 0x0a, 0x97, 0x55, 0xf8, 0x96, 0x52, 0xb2, 0x9a, 0xf4, 0x06, 0x2c, 0x9a, - 0x3b, 0x8b, 0xaa, 0x67, 0x18, 0x3a, 0xee, 0xbc, 0xca, 0x8f, 0x46, 0xf6, - 0x4a, 0x33, 0x5b, 0x56, 0x09, 0xb2, 0x72, 0x87, 0xdb, 0xbb, 0x57, 0x67, - 0x53, 0x82, 0x77, 0x31, 0x66, 0xbb, 0xf1, 0x33, 0x6d, 0x55, 0x82, 0xaa, - 0x80, 0xd4, 0x4d, 0xb8, 0xab, 0xbd, 0x2a, 0xda, 0x10, 0x3a, 0xc8, 0xf0, - 0x14, 0x1e, 0xcb, 0x8e, 0x76, 0x6c, 0xc8, 0x74, 0x05, 0xb3, 0x51, 0xbd, - 0x63, 0x06, 0x69, 0x05, 0x2a, 0x21, 0xd6, 0x2f, 0xe4, 0x38, 0xae, 0xf8, - 0xd4, 0xe9, 0xa7, 0xe8, 0xc8, 0x5a, 0x65, 0x7d, 0x54, 0x34, 0x33, 0x0d, - 0xf6, 0x07, 0xd6, 0x8c, 0xe5, 0x72, 0x9b, 0xfb, 0x60, 0x49, 0xd2, 0xaf, - 0xb4, 0x17, 0xc4, 0x74, 0x8d, 0xe5, 0x54, 0xda, 0x96, 0x56, 0x7d, 0x97, - 0x62, 0xe8, 0xec, 0x0d, 0x2b, 0x02, 0x2e, 0x59, 0xf8, 0xa1, 0x06, 0x6a, - 0xb6, 0x3e, 0x15, 0xeb, 0x64, 0x1a, 0x48, 0x3d, 0x53, 0x2c, 0x42, 0x3b, - 0x97, 0xa1, 0x3f, 0x47, 0x8b, 0x74, 0x87, 0x8b, 0x96, 0x63, 0x08, 0x4c, - 0x99, 0x38, 0x5a, 0xb6, 0x93, 0xa8, 0xcc, 0xee, 0x62, 0x3a, 0x00, 0x6d, - 0x5c, 0xab, 0x77, 0x3c, 0x46, 0xae, 0x6e, 0xeb, 0xf1, 0xf9, 0x63, 0xf1, - 0xa2, 0x31, 0x21, 0x38, 0xc3, 0x4f, 0xe2, 0x3a, 0x33, 0x7f, 0xe7, 0xc6, - 0x69, 0xd5, 0x1c, 0x7e, 0x5b, 0x4f, 0xb1, 0x50, 0x3b, 0xbe, 0x31, 0xa7, - 0x42, 0xa3, 0x97, 0x7b, 0xe3, 0x90, 0xd0, 0x07, 0xfd, 0x05, 0xb9, 0xf2, - 0x47, 0xc4, 0xc8, 0xdd, 0x1c, 0x3c, 0xa4, 0x22, 0x96, 0x04, 0xca, 0x28, - 0x17, 0xcc, 0x5c, 0x49, 0x7e, 0xc6, 0x93, 0x98, 0xd3, 0x8b, 0xd2, 0xf6, - 0x4a, 0xb6, 0xbe, 0x8d, 0xa2, 0xdd, 0xb6, 0x7c, 0x66, 0x0c, 0x29, 0xcb, - 0x1d, 0x98, 0xf6, 0xe4, 0xe5, 0x30, 0x4c, 0x84, 0xbf, 0x6f, 0x71, 0x4e, - 0xc2, 0x12, 0x9f, 0x35, 0xd6, 0xf8, 0xc6, 0x30, 0xe9, 0x9e, 0x1a, 0x8a, - 0x2f, 0xd1, 0x96, 0xb3, 0x3c, 0x0f, 0xf5, 0x78, 0xa7, 0xe0, 0xbd, 0x4b, - 0xe0, 0xd8, 0x3d, 0x57, 0xa5, 0x44, 0xa0, 0xd9, 0x10, 0x79, 0xd2, 0x10, - 0x50, 0xc7, 0x77, 0x73, 0x09, 0xf8, 0xb4, 0xcf, 0x66, 0xe3, 0x0c, 0xfb, - 0x96, 0xf8, 0x52, 0xb3, 0x7e, 0x44, 0xf0, 0x03, 0x54, 0xd4, 0xa2, 0x57, - 0x38, 0x8a, 0x96, 0xfc, 0x7c, 0x4c, 0x9f, 0x3a, 0xf2, 0xa2, 0x48, 0xbb, - 0x3e, 0xd1, 0x11, 0x2c, 0xab, 0xdf, 0x53, 0x96, 0xac, 0x58, 0x33, 0xb9, - 0xdd, 0xd2, 0x4f, 0x8a, 0x0a, 0x89, 0x0e, 0xd3, 0x6f, 0x58, 0x8c, 0xa1, - 0x0a, 0x0b, 0xa7, 0xd7, 0x1f, 0x0a, 0x70, 0xe3, 0x43, 0x12, 0x56, 0xb8, - 0x6c, 0xf8, 0x75, 0x4e, 0x2b, 0xb0, 0x17, 0x29, 0xe4, 0x95, 0x85, 0xd8, - 0x85, 0x95, 0x63, 0x55, 0xa8, 0x82, 0xf0, 0xe7, 0x7d, 0xf3, 0xf1, 0x78, - 0x66, 0xd1, 0x92, 0x71, 0x99, 0xad, 0x30, 0x94, 0xe9, 0x54, 0x2c, 0xe1, - 0x57, 0xf3, 0x6a, 0xe6, 0x0c, 0x5e, 0xc7, 0x58, 0xba, 0xb7, 0x61, 0xd3, - 0x74, 0x72, 0x96, 0x06, 0x0b, 0x01, 0x3d, 0xc2, 0xa1, 0xb4, 0x38, 0x81, - 0x19, 0x44, 0xbc, 0x84, 0x52, 0x22, 0xc9, 0x67, 0x81, 0x99, 0xfb, 0x0a, - 0xc2, 0xff, 0x50, 0x67, 0xbe, 0x38, 0x5e, 0x13, 0x16, 0x60, 0x83, 0x35, - 0xb9, 0x2f, 0xa9, 0x55, 0xbb, 0x30, 0x6b, 0x19, 0xfc, 0x2a, 0x40, 0x24, - 0x74, 0x20, 0x57, 0x78, 0xb9, 0x55, 0xb7, 0x70, 0x86, 0x65, 0x43, 0x1c, - 0x76, 0x2e, 0x91, 0x83, 0x5e, 0x33, 0xc2, 0xd4, 0xcc, 0xb5, 0x1c, 0x45, - 0xaf, 0xa3, 0x87, 0x95, 0x9b, 0x77, 0x50, 0x44, 0x7e, 0xdd, 0xca, 0x3f, - 0x51, 0x21, 0xae, 0xf2, 0x15, 0xa9, 0x32, 0x94, 0xca, 0xde, 0x3b, 0x97, - 0x13, 0x6b, 0xff, 0xe0, 0x79, 0x39, 0x40, 0xf0, 0x66, 0x7d, 0x5e, 0xef, - 0xec, 0x0a, 0x35, 0xd2, 0x0d, 0x09, 0x19, 0x13, 0xf2, 0xc2, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xdc, 0x07, 0x2e, 0x46, - 0xab, 0x4d, 0x6d, 0xf7, 0x24, 0xba, 0x02, 0xe3, 0xc5, 0xe3, 0xed, 0x64, - 0xc6, 0x77, 0x5a, 0x14, 0xae, 0x38, 0x52, 0x8c, 0x16, 0x2c, 0x52, 0x0e, - 0xf6, 0x65, 0x99, 0xcc, 0xf6, 0x9f, 0x77, 0xcc, 0x2e, 0xaf, 0x14, 0xd1, - 0xf0, 0x0f, 0xa7, 0x3e, 0x5b, 0x74, 0xff, 0xb9, 0xd3, 0x30, 0x02, 0x5e, - 0x52, 0xc8, 0x6f, 0x57, 0xef, 0x28, 0xf5, 0xfa, 0x9e, 0x70, 0x00, 0xfc, - 0x3e, 0xc3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0xaa, 0x9f, 0x86, 0xb0, 0x6d, 0xa1, 0x0c, 0xfa, 0xef, 0xb3, 0x6a, 0x50, - 0xa6, 0xfe, 0xff, 0xa9, 0x61, 0x0b, 0x18, 0x72, 0xee, 0xc6, 0xcd, 0x3a, - 0x34, 0x5e, 0xa8, 0x81, 0x31, 0x54, 0x25, 0x05, 0xc1, 0xd9, 0x66, 0x3d, - 0x17, 0xbb, 0x03, 0x21, 0x07, 0x69, 0x3a, 0x37, 0xe8, 0xd4, 0x6a, 0x68, - 0xe1, 0xa3, 0x19, 0x5a, 0x8d, 0x14, 0x11, 0x09, 0xef, 0xae, 0xfe, 0x94, - 0x19, 0x8a, 0xe4, 0xb9, 0x6e, 0xe8, 0xfa, 0x12, 0x2a, 0x5d, 0x00, 0x29, - 0x27, 0x6d, 0x5a, 0xa5, 0x09, 0x34, 0x79, 0x2b, 0xa8, 0xcc, 0x42, 0xb4, - 0xde, 0xe0, 0x91, 0xb9, 0x06, 0x0c, 0x11, 0x17, 0x25, 0x7a, 0x35, 0x57, - 0x51, 0x40, 0xf3, 0xc7, 0xc6, 0x4a, 0x69, 0x98, 0x2b, 0x2b, 0x3e, 0x5d, - 0x32, 0xd8, 0x8f, 0xb0, 0x1d, 0xee, 0x77, 0xe3, 0xaf, 0x4f, 0x71, 0x05, - 0x04, 0xd2, 0xff, 0x51, 0xed, 0xa4, 0x69, 0x50, 0x24, 0x2a, 0xe5, 0xaa, - 0xbb, 0xc6, 0x7a, 0x7f, 0xb2, 0xdf, 0x1d, 0xc2, 0x02, 0x2e, 0x52, 0xd1, - 0xd9, 0x5b, 0xe7, 0x6c, 0x50, 0x31, 0x4e, 0xdf, 0x8e, 0x3f, 0x37, 0xfc, - 0xf5, 0x34, 0x0e, 0xdb, 0x4c, 0x5d, 0x7d, 0xc8, 0xe4, 0x72, 0x40, 0xcb, - 0x95, 0xa5, 0x41, 0xeb, 0x78, 0x5f, 0x64, 0x20, 0x55, 0x19, 0xc7, 0xf9, - 0x9c, 0x71, 0x40, 0x8f, 0xcc, 0x2d, 0x86, 0xc0, 0xf4, 0x36, 0x2b, 0x0e, - 0x28, 0xb4, 0xad, 0x1b, 0xde, 0x60, 0x67, 0x03, 0x0f, 0x7c, 0x18, 0xd9, - 0xc3, 0x73, 0x67, 0x0d, 0x44, 0x3d, 0xbe, 0x7c, 0xcf, 0x96, 0x22, 0x0b, - 0x0e, 0x3a, 0x0b, 0xcf, 0x04, 0x95, 0x92, 0x7d, 0x4b, 0xa2, 0x6a, 0x0b, - 0x47, 0x72, 0x73, 0xa8, 0x9b, 0x96, 0x3d, 0xc6, 0x03, 0x34, 0xb1, 0x69, - 0xc2, 0x50, 0x60, 0x89, 0x8c, 0x55, 0x8f, 0x8e, 0x74, 0xa8, 0x9e, 0x25, - 0xe4, 0x0e, 0x73, 0xef, 0x4f, 0x51, 0xbe, 0xed, 0x5c, 0x14, 0xd3, 0xfa, - 0x94, 0x58, 0x8d, 0x5c, 0xa0, 0xb1, 0xfc, 0x37, 0x6e, 0x9c, 0x9e, 0x61, - 0xe5, 0x12, 0x13, 0xb2, 0x88, 0xc6, 0xcf, 0x60, 0x3f, 0x0d, 0x51, 0x33, - 0x22, 0xfa, 0xfb, 0x2d, 0x2b, 0x8d, 0x43, 0x9b, 0x3d, 0x1e, 0x88, 0x24, - 0x50, 0x78, 0xf7, 0x7e, 0x45, 0xb1, 0x0f, 0xa9, 0xe6, 0x77, 0xf8, 0x78, - 0xff, 0x57, 0x6a, 0x05, 0x06, 0x0c, 0x7e, 0x1e, 0x7f, 0xe9, 0x90, 0xe8, - 0x61, 0x68, 0xbc, 0x9e, 0xc4, 0xe5, 0x06, 0x04, 0x76, 0xcc, 0x01, 0x57, - 0x1a, 0x55, 0x9e, 0x45, 0x26, 0xd6, 0xd8, 0xc2, 0x50, 0x25, 0xfc, 0x72, - 0x4e, 0x18, 0xbe, 0xf2, 0x2f, 0xc0, 0x1b, 0xc8, 0x14, 0xeb, 0x24, 0xda, - 0x15, 0x0a, 0x83, 0x38, 0xc5, 0xdd, 0xc9, 0xd7, 0x12, 0x35, 0x55, 0xdf, - 0x2c, 0x23, 0xea, 0x17, 0xca, 0xbf, 0x18, 0xc9, 0x80, 0x63, 0x4b, 0x77, - 0x8b, 0x17, 0x01, 0x05, 0x1b, 0xa3, 0x0b, 0x0f, 0xdd, 0xc6, 0xe0, 0xdf, - 0xc9, 0xa6, 0x8c, 0x50, 0x95, 0x8d, 0x6c, 0x96, 0x67, 0xff, 0x88, 0x38, - 0x3b, 0x76, 0x72, 0x11, 0x35, 0xa0, 0x1c, 0xc8, 0x96, 0x9c, 0xe5, 0x90, - 0x79, 0x0e, 0x62, 0x57, 0x00, 0xd9, 0x57, 0xf8, 0xa4, 0xc2, 0xc2, 0x0a, - 0x17, 0x8e, 0xd7, 0x03, 0x6d, 0x4d, 0x14, 0xb6, 0x96, 0x8a, 0x76, 0x67, - 0x58, 0xce, 0x9c, 0xb3, 0x10, 0x49, 0x06, 0xeb, 0x56, 0x43, 0x40, 0xcb, - 0xd4, 0xd7, 0x59, 0x42, 0xa4, 0xd7, 0x21, 0x6a, 0x51, 0x3d, 0x1c, 0x54, - 0xd7, 0xd6, 0xa2, 0xcf, 0xf8, 0xf6, 0x72, 0x35, 0x04, 0xa6, 0xe3, 0x53, - 0xca, 0xc5, 0x62, 0xee, 0xa9, 0xc3, 0x6d, 0x1b, 0xc4, 0xc5, 0xd9, 0xa7, - 0x37, 0xc2, 0x04, 0x01, 0xc9, 0x4a, 0x2e, 0x26, 0xdd, 0x12, 0x6e, 0x41, - 0x64, 0xb4, 0xe8, 0xe8, 0xc7, 0xf8, 0xab, 0x8a, 0xab, 0x1d, 0x7f, 0x2d, - 0x58, 0xc2, 0xc4, 0xf0, 0x5d, 0x11, 0x35, 0x52, 0x88, 0xbc, 0x0f, 0x44, - 0x6e, 0x91, 0x1e, 0x87, 0xb4, 0xb1, 0x91, 0x52, 0x32, 0xe4, 0x38, 0x6d, - 0x5e, 0x8d, 0x30, 0xf0, 0xbc, 0xc3, 0x15, 0x80, 0x47, 0x36, 0x35, 0xb0, - 0x93, 0xf3, 0xc4, 0x82, 0xc7, 0x73, 0xc1, 0x67, 0x0c, 0x7a, 0x31, 0x36, - 0xbc, 0x73, 0x67, 0x66, 0xae, 0x48, 0x82, 0x27, 0x6e, 0x14, 0xd0, 0xd5, - 0x12, 0x10, 0xce, 0x5e, 0x37, 0xcd, 0x7e, 0xa5, 0xcb, 0xff, 0x91, 0xf0, - 0x62, 0xdb, 0x95, 0x74, 0x0c, 0x8c, 0x1e, 0x78, 0x11, 0x02, 0xb3, 0x02, - 0x0b, 0x31, 0xe7, 0x4e, 0x8b, 0x58, 0x6a, 0xde, 0x20, 0x93, 0x8b, 0x8e, - 0x62, 0x03, 0x24, 0xc9, 0xca, 0xf8, 0x44, 0x1d, 0x0c, 0x1b, 0xd8, 0x5d, - 0xcc, 0xe2, 0x8e, 0x02, 0xc6, 0x5c, 0x06, 0x45, 0xe6, 0x94, 0x8f, 0xa2, - 0x3e, 0xf5, 0xe9, 0xf5, 0x88, 0x87, 0xb2, 0x84, 0x1e, 0xb6, 0xb6, 0xfc, - 0x9f, 0x8e, 0x79, 0xf5, 0x4b, 0x24, 0x81, 0x3e, 0x5d, 0xf4, 0x10, 0x6e, - 0xdd, 0x8c, 0x8c, 0xae, 0xc6, 0x2c, 0x26, 0xb2, 0xfc, 0xf3, 0x99, 0xe8, - 0x8c, 0x65, 0x5d, 0x6c, 0xa8, 0x1d, 0x6f, 0x1e, 0x32, 0x0a, 0xee, 0x87, - 0xf6, 0xe1, 0xdd, 0x5e, 0x7f, 0x7a, 0x90, 0x8c, 0x3f, 0xe8, 0x47, 0x95, - 0x9b, 0xc8, 0x2c, 0x49, 0xc9, 0xe4, 0x2d, 0xea, 0x58, 0xfc, 0x29, 0x1a, - 0xb7, 0xa1, 0xf9, 0xb8, 0x84, 0x41, 0xa0, 0xf1, 0x77, 0x83, 0x56, 0x73, - 0x86, 0xea, 0xf4, 0xf5, 0x2a, 0xa6, 0x6b, 0x00, 0x64, 0x39, 0x08, 0x8f, - 0xf0, 0x22, 0x1a, 0x4c, 0xf2, 0x5a, 0xd0, 0xaa, 0x39, 0xae, 0x8a, 0xbc, - 0x03, 0x99, 0xf7, 0xcc, 0x80, 0xdf, 0x2b, 0x85, 0xbe, 0x1a, 0x97, 0x28, - 0x63, 0x04, 0x72, 0x75, 0x75, 0xb4, 0x9c, 0xd3, 0x17, 0xcc, 0x1e, 0xa1, - 0xd2, 0x47, 0x18, 0x45, 0xad, 0xb4, 0x0a, 0x32, 0x31, 0x36, 0x64, 0x48, - 0x3f, 0x7b, 0x4b, 0xc0, 0xd6, 0x78, 0x46, 0xaa, 0x90, 0x89, 0xf9, 0x36, - 0x3d, 0xb4, 0xb3, 0x50, 0x51, 0xd9, 0x55, 0x6f, 0xa9, 0xe7, 0x25, 0xaf, - 0xa0, 0xca, 0x9d, 0x45, 0x83, 0xc3, 0x0b, 0x2a, 0x0c, 0xf9, 0x3f, 0xe4, - 0x08, 0xf4, 0xbd, 0x23, 0x45, 0x85, 0xcf, 0x41, 0x93, 0xd3, 0x21, 0x5f, - 0x53, 0xa2, 0x5b, 0xa9, 0xf5, 0xe9, 0x8f, 0x2a, 0x2d, 0x53, 0x3c, 0x36, - 0x17, 0xce, 0x37, 0x35, 0x3e, 0x9e, 0x6b, 0xbc, 0xba, 0xaa, 0xa5, 0x61, - 0x79, 0x98, 0x8e, 0xbd, 0x19, 0xf4, 0x5f, 0xa9, 0xb8, 0x96, 0xa2, 0xce, - 0x32, 0x00, 0xab, 0x51, 0xcb, 0xfa, 0x30, 0x3a, 0x83, 0x92, 0x91, 0xad, - 0x08, 0x61, 0x62, 0x51, 0x7f, 0x19, 0xa9, 0x2a, 0x84, 0xf2, 0xab, 0x7e, - 0x5e, 0xa7, 0x5a, 0x54, 0x7f, 0x68, 0x2a, 0x7b, 0x4f, 0xde, 0x45, 0x1d, - 0xef, 0x73, 0x5f, 0xc0, 0x40, 0x6e, 0xec, 0x6c, 0xe9, 0xa5, 0x6b, 0x46, - 0x54, 0x7c, 0x24, 0x8b, 0xa4, 0xe5, 0xb4, 0x82, 0x31, 0x1f, 0x3e, 0x79, - 0x2e, 0x21, 0x8c, 0xf1, 0xbd, 0xad, 0x7c, 0x28, 0xcc, 0xbd, 0x58, 0x72, - 0xe9, 0x6a, 0x04, 0x56, 0x67, 0x0f, 0x62, 0x98, 0x5a, 0x97, 0x4b, 0xe2, - 0x67, 0x70, 0xbb, 0x17, 0xb1, 0x84, 0x5b, 0xd4, 0x6e, 0xab, 0x90, 0x29, - 0x20, 0x93, 0x34, 0xa8, 0x03, 0x0f, 0xed, 0x1a, 0xf0, 0x1b, 0x92, 0x87, - 0x43, 0xa5, 0x6a, 0x1c, 0xdc, 0xd7, 0x22, 0x68, 0x83, 0x98, 0x74, 0x2a, - 0x4c, 0x51, 0xef, 0x71, 0x19, 0xd5, 0x3d, 0x05, 0x19, 0x61, 0xb2, 0x52, - 0xa8, 0x6e, 0xda, 0x72, 0x51, 0x66, 0x9f, 0xf0, 0x12, 0xf6, 0x18, 0x60, - 0xcc, 0xd7, 0x2f, 0x2e, 0x83, 0x14, 0x09, 0xdb, 0x55, 0x1c, 0xf2, 0xaf, - 0xfd, 0xa4, 0x40, 0xf1, 0x4a, 0xc7, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x00, 0x9c, 0x52, 0xff, 0x48, 0x06, 0x61, 0x76, 0x6d, - 0xd7, 0x44, 0xb1, 0x0c, 0x32, 0x62, 0x15, 0xa1, 0xc3, 0x97, 0x03, 0xdd, - 0xed, 0x20, 0x3c, 0x3a, 0x09, 0x16, 0xe5, 0x7d, 0x8c, 0xf9, 0x7b, 0x22, - 0x5e, 0x3a, 0xdd, 0xf0, 0xc6, 0xf0, 0x3a, 0xd4, 0x94, 0x85, 0x1c, 0x60, - 0x74, 0x91, 0xa3, 0xe2, 0x8a, 0xe5, 0x3e, 0xd4, 0x95, 0x28, 0x8b, 0x1a, - 0x7b, 0xbe, 0x07, 0xc0, 0xe3, 0x6b, 0xb9, 0x85, 0x82, 0x0b, 0x24, 0xba, - 0x1c, 0xfc, 0xc0, 0x0a, 0x21, 0x33, 0xad, 0x00, 0x19, 0xce, 0xb5, 0x8f, - 0x73, 0x05, 0xf1, 0xac, 0x03, 0xbe, 0x1f, 0x22, 0xd5, 0x32, 0x5e, 0x50, - 0xe3, 0xe0, 0x62, 0x26, 0xf4, 0xb0, 0x85, 0xd8, 0xf7, 0xa7, 0xf4, 0xa7, - 0xff, 0x10, 0xb8, 0xbc, 0xe0, 0x3e, 0x4d, 0xcb, 0x37, 0x74, 0xcc, 0x85, - 0xed, 0xa0, 0x34, 0x6c, 0xfa, 0x37, 0x84, 0x6a, 0x94, 0x55, 0x3b, 0x1e, - 0x14, 0xab, 0x26, 0x7b, 0x3e, 0xac, 0xc3, 0x79, 0xcd, 0x1b, 0x00, 0x02, - 0xb3, 0x01, 0xc3, 0x10, 0xdd, 0x56, 0x7d, 0x0e, 0x69, 0x39, 0x3c, 0x17, - 0xa3, 0xae, 0x9c, 0x2d, 0xc7, 0x5a, 0x0b, 0x7c, 0xd0, 0xac, 0xa1, 0x91, - 0x6a, 0x6d, 0xc0, 0x3f, 0x98, 0xf1, 0x21, 0xf5, 0xa5, 0x7c, 0xbc, 0x70, - 0x0d, 0x7b, 0x2f, 0x0d, 0x5a, 0xa5, 0x4a, 0x5a, 0xff, 0x51, 0xbf, 0x7f, - 0xb5, 0x4f, 0x2c, 0xba, 0xa9, 0x46, 0x81, 0x6b, 0xac, 0xc6, 0x62, 0x2d, - 0xd7, 0xb5, 0x04, 0x5f, 0xd4, 0x5f, 0x1f, 0x6b, 0x11, 0x7d, 0xe3, 0x58, - 0x1f, 0xb5, 0xbf, 0x16, 0x43, 0x88, 0x05, 0xf5, 0xa4, 0x7b, 0xb5, 0x0e, - 0xf4, 0x01, 0xb6, 0x90, 0x69, 0x52, 0x0a, 0x5e, 0x9b, 0x87, 0x51, 0x5e, - 0xd5, 0xed, 0x2c, 0xcc, 0x58, 0xad, 0xe6, 0x77, 0xa2, 0xc5, 0x7c, 0x1e, - 0xc5, 0x92, 0xbe, 0xed, 0x3a, 0x9a, 0x97, 0xed, 0x56, 0xc8, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x16, 0xe8, 0x24, 0xe3, - 0x82, 0x36, 0x8e, 0x50, 0x45, 0xbe, 0xc6, 0x10, 0x02, 0xb9, 0x6d, 0xf9, - 0xed, 0x8f, 0x64, 0x35, 0x4d, 0x2c, 0x9f, 0x99, 0xdc, 0xee, 0xfa, 0x63, - 0x99, 0xc4, 0xb8, 0x3d, 0x77, 0xea, 0xda, 0xd5, 0x95, 0x8b, 0x8e, 0x76, - 0x02, 0x9c, 0x62, 0xa0, 0xad, 0xfe, 0x80, 0x61, 0x72, 0x59, 0xd6, 0x9f, - 0x16, 0x2e, 0x09, 0x71, 0xb8, 0xd7, 0x65, 0x25, 0xc2, 0x5b, 0x40, 0x67, - 0x8e, 0xd6, 0xf8, 0xdf, 0x67, 0x29, 0x19, 0xa2, 0xa6, 0x07, 0xf3, 0xc8, - 0x91, 0x7d, 0xf2, 0x50, 0x71, 0xba, 0x5c, 0x2d, 0xa7, 0xae, 0xc4, 0xd5, - 0xeb, 0xb9, 0x0d, 0x2d, 0x23, 0xe5, 0x8c, 0x65, 0xf5, 0xf8, 0x97, 0x69, - 0xde, 0x25, 0x6f, 0xea, 0x12, 0x72, 0x3e, 0xb9, 0xa7, 0x8d, 0xcf, 0xa5, - 0x66, 0xee, 0x4e, 0x2e, 0x66, 0x6b, 0xec, 0x77, 0x7f, 0x53, 0xdc, 0x29, - 0x73, 0x5e, 0xe9, 0x2f, 0x79, 0xac, 0x8d, 0x0f, 0x44, 0x09, 0x5d, 0x25, - 0x1d, 0x78, 0xb6, 0xe9, 0xd0, 0xfa, 0x8f, 0x5f, 0x9c, 0xf0, 0xe0, 0xfc, - 0x62, 0x9f, 0x52, 0x6b, 0x5b, 0x8e, 0x3f, 0xdf, 0xb4, 0xf1, 0xdf, 0x35, - 0xd0, 0x8f, 0x5a, 0xc9, 0x1f, 0x08, 0x86, 0xaa, 0x5a, 0x9e, 0xe8, 0xb0, - 0xaa, 0xd4, 0xcd, 0x2a, 0x5b, 0x4f, 0x7f, 0x39, 0x9f, 0x7f, 0x21, 0xf2, - 0xfd, 0x05, 0x96, 0x53, 0x09, 0xfd, 0x36, 0x4c, 0xcd, 0x98, 0x74, 0xf5, - 0xbd, 0xcd, 0x9e, 0x14, 0x15, 0x05, 0xb9, 0x3d, 0x5f, 0x8a, 0x02, 0x86, - 0x10, 0xd7, 0xd4, 0x01, 0x20, 0xd9, 0x8c, 0x65, 0x7d, 0x9d, 0x39, 0x25, - 0xbc, 0xce, 0x1a, 0xb1, 0x76, 0x92, 0xc3, 0x03, 0xed, 0xa2, 0x41, 0x31, - 0x0d, 0xc0, 0x40, 0x94, 0x01, 0xbc, 0x9b, 0xe9, 0x5e, 0x3e, 0x8c, 0x49, - 0xf6, 0x98, 0x0c, 0x39, 0x79, 0xdc, 0xd1, 0x1b, 0xc5, 0xb2, 0x20, 0xb4, - 0x6c, 0xb4, 0x4f, 0xce, 0xf4, 0x6c, 0x0b, 0xef, 0x85, 0xf2, 0x7d, 0x9a, - 0x90, 0x58, 0x1b, 0x51, 0x56, 0x52, 0xac, 0x75, 0x9f, 0x17, 0xe6, 0x48, - 0xaf, 0x18, 0x4c, 0xd8, 0x67, 0xe8, 0xd2, 0x61, 0xbc, 0xa0, 0x95, 0xc9, - 0x78, 0xd8, 0xa2, 0x1d, 0x47, 0x59, 0x30, 0xcf, 0xf3, 0x79, 0x06, 0xd4, - 0x25, 0xf8, 0x9c, 0x5c, 0x28, 0xee, 0xb0, 0xd2, 0xb6, 0xaf, 0x34, 0x0e, - 0xe5, 0xe4, 0x16, 0x2e, 0x05, 0x45, 0x23, 0xc1, 0x88, 0x90, 0x4a, 0x8f, - 0xff, 0xfb, 0xe2, 0xc0, 0xb7, 0xae, 0xb5, 0x50, 0xc9, 0x26, 0xf0, 0xa2, - 0xf5, 0x21, 0x23, 0x79, 0x23, 0xb6, 0x8f, 0x57, 0x64, 0xd1, 0x27, 0xc2, - 0x07, 0x63, 0xa6, 0x54, 0x1f, 0x2f, 0xca, 0x16, 0xb8, 0x28, 0x51, 0x2a, - 0x92, 0xe0, 0x06, 0x36, 0x55, 0x00, 0x6c, 0x99, 0x31, 0xa7, 0x56, 0xb3, - 0x7b, 0x15, 0xcd, 0xc1, 0x32, 0x3a, 0xc0, 0x37, 0x1f, 0xea, 0x29, 0xb6, - 0x75, 0xdf, 0x8a, 0x17, 0x09, 0x45, 0xc2, 0x6e, 0xe2, 0x4c, 0xa5, 0x93, - 0x9b, 0x17, 0x08, 0x27, 0x75, 0x33, 0xdb, 0x1f, 0xab, 0x37, 0xad, 0x8e, - 0xaa, 0xef, 0x0b, 0x82, 0xaa, 0xa7, 0xae, 0x2c, 0x43, 0x4d, 0x8f, 0xa0, - 0x43, 0xd7, 0xa1, 0x34, 0xeb, 0xc0, 0x4e, 0xbd, 0x64, 0xfc, 0xc8, 0x6a, - 0x56, 0xa8, 0xfc, 0x9e, 0x2d, 0x5f, 0x7a, 0xa3, 0x72, 0x06, 0x79, 0x38, - 0x33, 0x05, 0xa7, 0xf0, 0x09, 0x48, 0x55, 0xfe, 0x3f, 0xab, 0x25, 0x8e, - 0x76, 0x1d, 0x12, 0x5a, 0x20, 0x68, 0xfb, 0x51, 0x51, 0x33, 0x40, 0x37, - 0x0c, 0x90, 0x98, 0x6f, 0x66, 0x3f, 0x40, 0xa2, 0x2e, 0x3c, 0xd1, 0x22, - 0x51, 0x54, 0x25, 0x7e, 0x4c, 0x5d, 0x96, 0xb2, 0x65, 0x0f, 0xa3, 0xdf, - 0x8e, 0x97, 0xfe, 0xeb, 0xe7, 0xc6, 0x22, 0x2a, 0x47, 0x3a, 0x78, 0x1b, - 0x39, 0x2e, 0xd6, 0xbc, 0x35, 0xb4, 0xf4, 0xc3, 0xf2, 0x6a, 0x12, 0xc9, - 0xe7, 0x6c, 0x9a, 0xfc, 0xed, 0xbc, 0x11, 0xc7, 0x71, 0x09, 0x8f, 0x56, - 0xc1, 0xd8, 0xb6, 0x92, 0x35, 0x97, 0x8e, 0x71, 0xd2, 0xbb, 0xb4, 0xed, - 0xf0, 0x7e, 0xff, 0x58, 0xd9, 0x95, 0x26, 0xea, 0xa9, 0x4d, 0x38, 0x8d, - 0x4e, 0x8e, 0x53, 0xae, 0x7e, 0xe6, 0xe6, 0x82, 0x35, 0x96, 0xab, 0x0f, - 0x04, 0x0f, 0xf2, 0xac, 0x1b, 0xcd, 0x07, 0x17, 0x1b, 0x25, 0x2f, 0x92, - 0xaf, 0x19, 0xa2, 0x1b, 0xa0, 0x7a, 0xc7, 0x4f, 0xb8, 0x1b, 0x89, 0x21, - 0xb5, 0xe2, 0x24, 0xe9, 0x78, 0xae, 0x7d, 0xd7, 0xcc, 0x8e, 0x3f, 0xa7, - 0xe9, 0xbe, 0xe6, 0x79, 0x0f, 0xdf, 0x86, 0xe9, 0xb9, 0xcd, 0x82, 0x7b, - 0xf5, 0x04, 0x89, 0xa0, 0x73, 0x5d, 0xa2, 0x4e, 0xd6, 0xa0, 0x60, 0x21, - 0xe2, 0xfe, 0xd3, 0xf4, 0x19, 0x8b, 0x6a, 0x03, 0x12, 0x9c, 0x51, 0x9a, - 0x41, 0x4e, 0xf6, 0xb4, 0x6e, 0x0c, 0x43, 0xf5, 0x00, 0x00, 0x78, 0x12, - 0xdd, 0x21, 0xa8, 0xc7, 0x21, 0xa1, 0x4e, 0x44, 0x10, 0xd0, 0xdb, 0x6f, - 0x0b, 0x4c, 0xe7, 0x7a, 0x8c, 0x0c, 0xaa, 0xb6, 0x9a, 0x7d, 0xa9, 0xff, - 0x5a, 0x2e, 0x15, 0x9e, 0x6f, 0xea, 0xe1, 0x42, 0x0c, 0x9c, 0x5a, 0x3b, - 0xd5, 0xe6, 0xde, 0x23, 0x3f, 0x9c, 0x45, 0x20, 0x67, 0x96, 0x50, 0x16, - 0x80, 0x42, 0xe7, 0x67, 0x7d, 0x24, 0xdc, 0x00, 0xaa, 0x01, 0x8a, 0xa3, - 0x61, 0xfe, 0x9a, 0xce, 0xc1, 0xe5, 0x2e, 0x19, 0x85, 0x04, 0xe6, 0x7b, - 0xe8, 0x7a, 0xbc, 0x9d, 0xfe, 0x71, 0x29, 0x1d, 0x17, 0xae, 0x6b, 0x1a, - 0x64, 0xd7, 0xfe, 0x18, 0x29, 0x07, 0x9b, 0x49, 0x43, 0xba, 0x29, 0x37, - 0xa8, 0xb0, 0x26, 0x27, 0x6b, 0x7d, 0xde, 0x49, 0x12, 0x90, 0x05, 0xe2, - 0x2c, 0xd8, 0x08, 0xd0, 0x5d, 0x74, 0xa7, 0x15, 0xbe, 0x34, 0x34, 0x6d, - 0xad, 0xfb, 0xa8, 0x01, 0x4a, 0x6c, 0x98, 0xba, 0x84, 0x38, 0xbd, 0x05, - 0xe8, 0x87, 0x27, 0x91, 0x3f, 0xb8, 0xe9, 0x06, 0x27, 0xda, 0x56, 0x07, - 0xaa, 0xea, 0xf4, 0x80, 0x5c, 0x12, 0x44, 0xbe, 0x23, 0xb3, 0x63, 0x9f, - 0x5f, 0x37, 0xa7, 0x53, 0x4c, 0xfc, 0x4d, 0x87, 0xeb, 0x91, 0xe8, 0xd7, - 0x5a, 0xd6, 0xca, 0x67, 0x2d, 0x2f, 0x5a, 0x0e, 0xc7, 0x82, 0x78, 0xa4, - 0xf3, 0x56, 0x07, 0xa5, 0xab, 0x6d, 0x09, 0xd2, 0x0d, 0x08, 0x6b, 0x6e, - 0x1f, 0xc1, 0xf2, 0x91, 0x1a, 0x39, 0xfe, 0x14, 0x56, 0x3f, 0xeb, 0x9f, - 0x14, 0xc2, 0xb3, 0xb2, 0xc2, 0x8d, 0xc2, 0xee, 0x7e, 0xf0, 0x7d, 0x92, - 0xd2, 0xc3, 0x57, 0x3e, 0x2c, 0x07, 0x1b, 0x6a, 0x9b, 0x3b, 0x79, 0x59, - 0xc9, 0x22, 0x96, 0x6c, 0x3e, 0x37, 0xd3, 0x0e, 0x5c, 0xf6, 0x8f, 0xa9, - 0xaa, 0xc9, 0xa4, 0x4b, 0xaf, 0x5d, 0x1a, 0xb6, 0xf3, 0x91, 0x32, 0x4f, - 0xca, 0x72, 0xa0, 0x42, 0x01, 0x51, 0xaf, 0x19, 0x89, 0xc4, 0xcc, 0x9b, - 0xf3, 0x52, 0xe9, 0xa6, 0xf2, 0x71, 0x6f, 0x5a, 0x38, 0x02, 0xb8, 0x75, - 0x88, 0x5f, 0x8d, 0x12, 0xc5, 0x55, 0x4f, 0xd1, 0xba, 0xf2, 0x24, 0xdc, - 0x63, 0x5f, 0x93, 0xc7, 0xf3, 0xe7, 0x59, 0xac, 0xc3, 0xed, 0xbc, 0x02, - 0xe3, 0xad, 0xb2, 0x8e, 0x2c, 0x2d, 0x47, 0xb4, 0x34, 0x8d, 0xae, 0x44, - 0xc8, 0x5f, 0x14, 0xe8, 0x8e, 0x7b, 0xc3, 0x60, 0x53, 0x9a, 0x51, 0xea, - 0x7f, 0x2f, 0xb6, 0x62, 0x61, 0xf7, 0xc0, 0x18, 0x0f, 0x20, 0x79, 0x13, - 0x5c, 0xe8, 0xca, 0x04, 0x29, 0x5f, 0x70, 0x4d, 0x88, 0xa2, 0x43, 0x20, - 0x57, 0x33, 0x04, 0x74, 0x8e, 0x7c, 0x89, 0xd4, 0x56, 0x8f, 0x93, 0x86, - 0x81, 0x6c, 0x11, 0xfc, 0x32, 0x0e, 0xb0, 0x3e, 0xe5, 0x13, 0xbf, 0x76, - 0x62, 0xcc, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x0e, 0xf8, 0x8f, 0xde, 0xfd, 0xfd, 0xcf, 0xd1, 0x6f, 0x9f, 0xf2, 0xb6, - 0xb6, 0x59, 0xb2, 0x73, 0x1c, 0x3c, 0x0d, 0xb0, 0x4d, 0xb8, 0x96, 0xc6, - 0xeb, 0xe5, 0xf8, 0x0d, 0x3e, 0xd7, 0x0c, 0xbd, 0x9c, 0xaa, 0xd5, 0x1c, - 0x19, 0x9a, 0x4c, 0x8e, 0xfa, 0xac, 0x68, 0x74, 0x16, 0x06, 0xb5, 0x49, - 0xe7, 0xd5, 0x6f, 0x4f, 0xcc, 0xd9, 0x02, 0x74, 0xd6, 0x08, 0x73, 0x7c, - 0xa9, 0xfa, 0x3e, 0x50, 0x87, 0xf7, 0xfb, 0xa6, 0x94, 0xdc, 0xb1, 0x40, - 0xec, 0xa7, 0xa9, 0x39, 0xff, 0x40, 0x4a, 0x97, 0x9b, 0xcc, 0x57, 0x66, - 0x68, 0xd6, 0xa8, 0x4d, 0x13, 0x06, 0x0e, 0x03, 0xc4, 0xdf, 0x7a, 0xe4, - 0x2f, 0x0e, 0xd7, 0x54, 0xe0, 0xbd, 0x93, 0xeb, 0x82, 0xd8, 0x05, 0x2d, - 0xa2, 0xf0, 0x4e, 0xd0, 0xf9, 0x3e, 0x3e, 0x6b, 0x3d, 0x08, 0x39, 0x4e, - 0x35, 0x13, 0x7b, 0x3b, 0x39, 0x2c, 0x47, 0x2c, 0x61, 0x9f, 0xfd, 0x59, - 0x88, 0x5f, 0x65, 0x08, 0xa9, 0x66, 0xec, 0xb5, 0x21, 0xf3, 0xe9, 0xba, - 0x11, 0x63, 0x24, 0x6c, 0xf4, 0x50, 0x3a, 0xe5, 0x0c, 0x06, 0x39, 0x69, - 0x2f, 0xca, 0x0f, 0x48, 0xbe, 0x95, 0x7d, 0x13, 0x3d, 0xa5, 0x75, 0x69, - 0x85, 0xc8, 0xb3, 0x72, 0x72, 0x3c, 0x4f, 0x96, 0xe7, 0xb7, 0xbd, 0xe7, - 0x76, 0xba, 0xac, 0xc0, 0x07, 0x4d, 0xc1, 0xed, 0xb9, 0xf0, 0x91, 0x2e, - 0x36, 0xb7, 0x5b, 0x1c, 0xb7, 0xd6, 0xb3, 0x45, 0x7d, 0x0a, 0xf5, 0x43, - 0xdd, 0x7a, 0x8b, 0x4e, 0x18, 0xf2, 0xf3, 0x19, 0xcd, 0x4a, 0xda, 0x3c, - 0x1b, 0x05, 0x27, 0x67, 0x43, 0xa9, 0x8e, 0xe7, 0x4a, 0x95, 0xa9, 0xad, - 0x6c, 0x8c, 0xb2, 0x2e, 0x12, 0xcb, 0xf3, 0xeb, 0x65, 0x26, 0xf4, 0x3e, - 0x86, 0xee, 0x7e, 0xd9, 0xba, 0xce, 0x8d, 0x15, 0x3e, 0xa8, 0x40, 0x59, - 0x1d, 0x27, 0x78, 0x75, 0xf0, 0xf9, 0x33, 0xb5, 0x32, 0xa9, 0x66, 0xe6, - 0x2e, 0x2e, 0x3d, 0xf5, 0x4a, 0xf0, 0x97, 0x2d, 0xe7, 0x43, 0x85, 0x43, - 0x61, 0x25, 0x15, 0x13, 0x9e, 0x8e, 0xf6, 0x78, 0xe8, 0x67, 0xba, 0xc2, - 0x6d, 0xda, 0x46, 0x25, 0x76, 0xd9, 0x9b, 0x69, 0x95, 0x4b, 0x50, 0x8c, - 0xb7, 0x36, 0x49, 0xbc, 0xd7, 0x39, 0x69, 0xb9, 0xc1, 0x5f, 0x5f, 0xcc, - 0x83, 0x4c, 0x16, 0xb8, 0x0c, 0x85, 0xf1, 0xa4, 0x57, 0x6c, 0x22, 0x1f, - 0x60, 0x0c, 0xff, 0xb6, 0xc9, 0xf7, 0x21, 0x2d, 0x35, 0x78, 0x31, 0x79, - 0xd0, 0x6d, 0x61, 0xec, 0x61, 0x04, 0x75, 0x5c, 0x06, 0xc3, 0x53, 0x1b, - 0xb5, 0xdc, 0x23, 0xb9, 0xd9, 0x07, 0xd1, 0xd0, 0xb3, 0xa5, 0xab, 0xd9, - 0xbe, 0xb7, 0xdc, 0xae, 0x3f, 0x3e, 0xd7, 0x2a, 0x79, 0x3f, 0x9c, 0x27, - 0x81, 0x8d, 0x61, 0xe8, 0x46, 0x8f, 0x05, 0xf4, 0x9c, 0x30, 0x35, 0x9a, - 0x2f, 0x62, 0x84, 0x7c, 0xa5, 0x95, 0x68, 0x34, 0xe6, 0xf0, 0xb9, 0x42, - 0xd4, 0x37, 0xc6, 0xd2, 0x35, 0x1f, 0x7b, 0xe0, 0xa6, 0x92, 0xcf, 0xf7, - 0x0f, 0x08, 0x10, 0x79, 0xbd, 0xa8, 0x7c, 0x4e, 0xef, 0xf1, 0x01, 0x8d, - 0x1b, 0x0c, 0x98, 0x46, 0x28, 0xdc, 0xd5, 0xa8, 0xcf, 0x67, 0x7d, 0x87, - 0x2a, 0x8f, 0xdd, 0x52, 0x43, 0x5a, 0x55, 0x80, 0x88, 0xa6, 0xcd, 0x9c, - 0x5d, 0x36, 0xae, 0xef, 0x61, 0x43, 0xec, 0xf0, 0x7f, 0x92, 0x21, 0x1f, - 0xa2, 0xa3, 0x76, 0x0e, 0x5d, 0xf3, 0xa7, 0xe7, 0x7d, 0xb0, 0x2c, 0x94, - 0x36, 0x95, 0x34, 0x4e, 0x04, 0xfb, 0x51, 0xf9, 0xe6, 0x7e, 0x56, 0x7a, - 0x59, 0xce, 0x0a, 0x45, 0x7e, 0xeb, 0xc4, 0xbc, 0xfd, 0x20, 0xaa, 0x34, - 0x6b, 0xee, 0x3b, 0x09, 0xe8, 0x00, 0x4b, 0xfc, 0x68, 0x24, 0x43, 0xdb, - 0x09, 0x58, 0xd0, 0xb6, 0xbf, 0xaf, 0x1d, 0x7f, 0x8a, 0x4c, 0x9e, 0x51, - 0x97, 0x97, 0xe1, 0x0c, 0x0d, 0xaf, 0xd1, 0x1e, 0x62, 0xad, 0x70, 0xa5, - 0x8a, 0x24, 0x2f, 0x4a, 0xa6, 0x55, 0xb1, 0x44, 0x09, 0x88, 0xab, 0xa5, - 0x45, 0x28, 0xa0, 0x34, 0x9e, 0x14, 0x2c, 0xf9, 0x0f, 0xb8, 0x33, 0x8f, - 0xcc, 0xba, 0x50, 0x34, 0x4c, 0x96, 0x89, 0x09, 0xb9, 0xa8, 0xfb, 0xac, - 0x59, 0x73, 0xea, 0x61, 0xbc, 0x0d, 0x24, 0x3a, 0x20, 0xc2, 0x76, 0xfc, - 0x2e, 0xce, 0xfb, 0x75, 0x00, 0xca, 0x58, 0xbd, 0xab, 0x61, 0x9b, 0x13, - 0x2b, 0xa3, 0xf6, 0x15, 0x55, 0x83, 0x23, 0xc4, 0xf3, 0x4c, 0x89, 0xc5, - 0x4a, 0x18, 0x5c, 0x8d, 0x41, 0xcc, 0x06, 0x7b, 0xe3, 0x2a, 0x1f, 0x6a, - 0x57, 0xbc, 0x54, 0x61, 0x0c, 0xf2, 0xec, 0xbf, 0xb0, 0xf0, 0x21, 0xde, - 0xfc, 0xe4, 0xef, 0xce, 0x47, 0xc8, 0xdc, 0x11, 0xc7, 0x8a, 0x12, 0x97, - 0x68, 0x1d, 0x9e, 0x9a, 0xbf, 0xad, 0x62, 0x7e, 0x4b, 0x88, 0xd7, 0x20, - 0x22, 0xce, 0x5e, 0xe3, 0x87, 0x12, 0xa3, 0x05, 0xef, 0x1f, 0x05, 0xb1, - 0xbd, 0x1b, 0x80, 0x43, 0x84, 0x33, 0x8b, 0x87, 0xa5, 0xc2, 0xe1, 0x49, - 0xa8, 0x75, 0x49, 0x9b, 0x1b, 0x64, 0x8a, 0xd0, 0x86, 0x10, 0xa8, 0x72, - 0xeb, 0x2e, 0xe7, 0x3f, 0xaa, 0x6b, 0x4a, 0x22, 0xae, 0x17, 0x8f, 0x10, - 0x22, 0x03, 0x66, 0x67, 0x35, 0x40, 0x29, 0x1e, 0xf2, 0x05, 0x36, 0xd5, - 0xed, 0xe2, 0x2a, 0xcc, 0x77, 0xe2, 0x16, 0xef, 0xa7, 0x9b, 0xe1, 0x1b, - 0xba, 0xf3, 0xf5, 0x74, 0x6c, 0x2a, 0x98, 0x8a, 0x14, 0xaf, 0x2c, 0xab, - 0xfb, 0x51, 0x53, 0x75, 0x17, 0xcb, 0x5c, 0x86, 0xb5, 0x60, 0x70, 0x29, - 0x65, 0x69, 0x49, 0x42, 0x4f, 0x42, 0x6b, 0xc7, 0xdb, 0x98, 0x7d, 0x1e, - 0xf8, 0x45, 0xb2, 0x33, 0xd6, 0x34, 0x26, 0xa6, 0x7f, 0x76, 0x31, 0x13, - 0x13, 0x9d, 0xd2, 0xb0, 0x30, 0x0b, 0x0b, 0x3e, 0x1a, 0x84, 0xb0, 0xbd, - 0x81, 0x34, 0x25, 0x73, 0x99, 0x87, 0x1a, 0xc8, 0x44, 0x34, 0x9d, 0x1a, - 0x3d, 0x76, 0x44, 0x1d, 0xe2, 0x22, 0xad, 0x3d, 0xb2, 0xa3, 0x1c, 0xd5, - 0x27, 0x8c, 0xc6, 0x84, 0xdf, 0x33, 0xbe, 0xb2, 0xa7, 0xb9, 0xc5, 0x6e, - 0x48, 0xdc, 0xe9, 0xf8, 0xef, 0xfc, 0xaa, 0x1f, 0x5e, 0x41, 0x48, 0x1e, - 0xe0, 0xb9, 0xd6, 0x6e, 0x7a, 0x9c, 0xa3, 0x98, 0x4b, 0xfa, 0x90, 0xa4, - 0x58, 0x33, 0x85, 0x3b, 0x11, 0x44, 0x83, 0x4b, 0x1e, 0x0e, 0x5d, 0x11, - 0x36, 0x15, 0xe1, 0xbf, 0x15, 0x04, 0x8e, 0x88, 0xc6, 0x18, 0x53, 0xc3, - 0x8d, 0x28, 0x86, 0x25, 0xef, 0x55, 0x7b, 0xf6, 0x85, 0xf8, 0xed, 0x3b, - 0xcf, 0x5d, 0xa6, 0xc7, 0x66, 0xb7, 0xbe, 0x14, 0xf0, 0x62, 0x89, 0x1f, - 0x32, 0x1e, 0x86, 0x2a, 0x93, 0xd5, 0xca, 0x37, 0x03, 0x0b, 0xf8, 0x0f, - 0xca, 0x50, 0x6c, 0x16, 0x2b, 0xf0, 0x77, 0xca, 0xbb, 0x8e, 0x95, 0x11, - 0xef, 0x5b, 0xbe, 0x2f, 0x62, 0x50, 0xb8, 0x3d, 0xff, 0xfa, 0x30, 0x21, - 0xb2, 0x86, 0x3f, 0x50, 0x57, 0x98, 0x79, 0x15, 0xce, 0x3e, 0xbf, 0x49, - 0x58, 0xb0, 0xb5, 0xd7, 0xbe, 0x01, 0x55, 0xee, 0x60, 0x14, 0x9d, 0x5b, - 0x57, 0x48, 0x05, 0x72, 0x6a, 0x23, 0x29, 0xeb, 0xf3, 0x36, 0x2a, 0xc1, - 0xda, 0x5e, 0x4a, 0x63, 0xc4, 0x6b, 0x04, 0xe8, 0xe8, 0xc1, 0xb5, 0xc4, - 0x2d, 0x60, 0x1f, 0xa0, 0x2b, 0x33, 0xa5, 0xb7, 0x82, 0x59, 0x21, 0xba, - 0x13, 0xda, 0x79, 0xda, 0x5a, 0xb1, 0x82, 0x5b, 0x52, 0x7f, 0x0c, 0x70, - 0x75, 0x65, 0xe0, 0x44, 0xb3, 0xca, 0xd0, 0x09, 0x38, 0x24, 0x83, 0x8e, - 0x0c, 0x4c, 0xef, 0x96, 0xe4, 0x04, 0x30, 0x46, 0x23, 0x6a, 0x28, 0x13, - 0x1d, 0x37, 0x14, 0x75, 0x6e, 0xd0, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x21, 0xa2, 0xf0, 0x7d, 0x29, 0x8f, 0x62, 0x2e, - 0xf4, 0x0e, 0x14, 0x9b, 0x60, 0x38, 0xc0, 0x95, 0xfb, 0x3c, 0x90, 0x5a, - 0xa0, 0x1f, 0x30, 0x09, 0xfc, 0x6d, 0xa9, 0xd1, 0x7b, 0x0b, 0x7c, 0x78, - 0xf9, 0xf6, 0xa8, 0x5e, 0xa6, 0x7a, 0xf6, 0x1c, 0xab, 0x1b, 0x0e, 0xa9, - 0x08, 0xfd, 0xd9, 0x97, 0x08, 0x24, 0x2b, 0xda, 0x08, 0x8b, 0x0c, 0x07, - 0x70, 0x15, 0xa8, 0x0c, 0x86, 0xfc, 0xd1, 0x84, 0xba, 0xd0, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x35, 0x7a, 0xab, 0xaa, - 0xbe, 0xd7, 0xad, 0x22, 0x99, 0x46, 0xbb, 0x78, 0xfd, 0x47, 0x8f, 0x2a, - 0x4a, 0xa6, 0x2f, 0x8d, 0x15, 0x07, 0xed, 0x26, 0x1d, 0xb3, 0x12, 0xd3, - 0x88, 0x0f, 0xf1, 0x75, 0x2a, 0x07, 0x62, 0xac, 0xbf, 0x52, 0x4a, 0xc3, - 0x12, 0xe5, 0x3c, 0xea, 0xa6, 0x1e, 0x57, 0x90, 0x56, 0x60, 0x7d, 0xcf, - 0x4b, 0x65, 0xaf, 0xee, 0x17, 0x56, 0xbe, 0xd2, 0x38, 0x3f, 0xd6, 0xbc, - 0xef, 0xa7, 0x32, 0xb7, 0x10, 0xe9, 0xbd, 0x97, 0x45, 0x92, 0x3c, 0xd3, - 0x35, 0x2e, 0x59, 0x37, 0x65, 0x5c, 0x7f, 0xd0, 0x99, 0x9c, 0x01, 0xe9, - 0x1f, 0x65, 0xe9, 0xec, 0x0f, 0x2d, 0x46, 0xbc, 0xd4, 0x8f, 0x51, 0x1c, - 0xa0, 0xa4, 0x9b, 0x4f, 0x95, 0x54, 0xb0, 0x50, 0x74, 0xfa, 0x0f, 0xe6, - 0x55, 0x81, 0xce, 0x0f, 0xd1, 0x25, 0x56, 0xc8, 0x2f, 0x3a, 0x65, 0xd4, - 0x86, 0x4a, 0x8e, 0xff, 0x5a, 0xcc, 0x67, 0x96, 0xcc, 0x65, 0x0d, 0x20, - 0xee, 0xba, 0x6b, 0xcb, 0xde, 0x10, 0x2f, 0xbf, 0x67, 0x6d, 0xbe, 0xef, - 0x72, 0xfc, 0x25, 0x62, 0xbf, 0xbb, 0xc5, 0xe0, 0x7b, 0x4c, 0x32, 0xc5, - 0xdb, 0x9f, 0xb5, 0xe2, 0x75, 0x8a, 0xba, 0xbb, 0x69, 0x28, 0xb6, 0x41, - 0x25, 0x83, 0x67, 0x35, 0x1b, 0xd7, 0xb3, 0xd7, 0x58, 0x54, 0x8a, 0x0b, - 0x7c, 0xf3, 0x05, 0xcf, 0x2c, 0x78, 0x70, 0xc6, 0xed, 0x7e, 0x56, 0xb6, - 0x4e, 0x48, 0xaa, 0x57, 0xc4, 0xb0, 0xb2, 0xa0, 0xca, 0x50, 0xe1, 0xc7, - 0x41, 0xea, 0xac, 0x5f, 0x18, 0x13, 0xe5, 0x85, 0x78, 0x3f, 0x05, 0xf3, - 0xfd, 0x74, 0x7a, 0x42, 0x61, 0x91, 0x19, 0xc6, 0x19, 0xe9, 0xd2, 0x78, - 0x2c, 0xb1, 0xa3, 0x7f, 0x62, 0xea, 0x2a, 0x35, 0x1c, 0x55, 0xa3, 0xf7, - 0xdc, 0xec, 0x48, 0x23, 0x99, 0x8d, 0xe1, 0x4d, 0x45, 0xad, 0x92, 0xc6, - 0xf4, 0xa2, 0xe5, 0xe6, 0x58, 0xe4, 0xd5, 0x37, 0xd0, 0x47, 0x0b, 0x64, - 0x68, 0x48, 0x7e, 0xeb, 0xbe, 0x5e, 0x74, 0xd1, 0xc4, 0xa5, 0x60, 0xd0, - 0x30, 0x62, 0xbc, 0x81, 0xc4, 0x01, 0x68, 0x18, 0xf3, 0xac, 0x9d, 0xb1, - 0x4d, 0xdd, 0x8b, 0xd2, 0x54, 0x5d, 0xd1, 0x1c, 0xee, 0x75, 0x9e, 0x99, - 0x42, 0x69, 0x38, 0xcc, 0x66, 0x24, 0xd9, 0x8f, 0x70, 0x98, 0xc3, 0x5e, - 0x08, 0xf0, 0xd8, 0x2d, 0xe6, 0x52, 0x48, 0xdf, 0xd0, 0x03, 0x04, 0x92, - 0xab, 0xa1, 0xa1, 0x2f, 0x7d, 0x84, 0xb2, 0x82, 0x51, 0x56, 0x74, 0x4a, - 0x94, 0xff, 0xd2, 0xe4, 0x4e, 0x1a, 0xbd, 0x18, 0xab, 0x33, 0x68, 0x0e, - 0x4f, 0x99, 0x1d, 0x7e, 0x02, 0x3f, 0x1f, 0x50, 0x05, 0xf8, 0x59, 0x47, - 0x97, 0x98, 0x60, 0xb1, 0x30, 0xb1, 0x14, 0xac, 0x2c, 0x0a, 0xa8, 0x97, - 0x83, 0xf5, 0x5a, 0x5c, 0x87, 0xe5, 0x36, 0x26, 0xec, 0xb4, 0x94, 0x46, - 0x9a, 0xad, 0x2b, 0x9a, 0xb7, 0xac, 0xc4, 0x1a, 0x55, 0x53, 0xc0, 0x16, - 0x91, 0x1c, 0xd6, 0xaa, 0x6b, 0xdd, 0x85, 0x6a, 0x54, 0xec, 0x7c, 0xa1, - 0xd5, 0x18, 0x00, 0x74, 0xd2, 0xf1, 0x7e, 0xad, 0x7c, 0xa8, 0x85, 0x9b, - 0xc0, 0x9f, 0x4f, 0x3b, 0xd9, 0x08, 0xc8, 0x9d, 0x31, 0x22, 0x7a, 0x53, - 0xa8, 0xbd, 0x00, 0xdf, 0xe8, 0x39, 0x52, 0xe9, 0x14, 0x74, 0x7b, 0x53, - 0xf9, 0xbd, 0x29, 0x8e, 0x5d, 0xf2, 0x35, 0x3b, 0xe3, 0x48, 0xbf, 0xa0, - 0xc4, 0x3d, 0x40, 0xb4, 0xf2, 0x7c, 0xd0, 0xe3, 0x17, 0x11, 0x5b, 0xd6, - 0x55, 0xd2, 0x54, 0xcf, 0x20, 0x8d, 0x74, 0x4a, 0x6b, 0xe9, 0x5d, 0xfe, - 0x72, 0x14, 0x6a, 0x11, 0x8b, 0x14, 0x19, 0xba, 0x63, 0xe4, 0x6b, 0x39, - 0xb4, 0x90, 0x67, 0x79, 0x56, 0x31, 0xd3, 0xb5, 0xeb, 0x9e, 0x95, 0x4b, - 0x1e, 0x04, 0x20, 0xd8, 0xbe, 0xe8, 0x1c, 0xd7, 0x95, 0xcb, 0x57, 0x60, - 0xe6, 0x11, 0x35, 0x42, 0x90, 0xfd, 0xb2, 0xe4, 0x9b, 0x24, 0x70, 0xc0, - 0xc3, 0xa9, 0x8a, 0xc9, 0x46, 0xd0, 0xea, 0xc9, 0x93, 0x7d, 0x9f, 0x64, - 0x12, 0x54, 0x09, 0xb7, 0xc2, 0x4d, 0x6e, 0xcc, 0x60, 0x07, 0x36, 0x31, - 0x64, 0x3d, 0x1e, 0xd3, 0x86, 0x47, 0x47, 0x42, 0x76, 0xb6, 0xf0, 0xe5, - 0xb4, 0xe7, 0xbe, 0x47, 0x91, 0x78, 0xbe, 0x06, 0xf1, 0x6e, 0x58, 0xce, - 0x32, 0x13, 0x26, 0x34, 0x92, 0xae, 0xb2, 0x29, 0xd0, 0x30, 0x55, 0xfd, - 0x89, 0x6a, 0xbf, 0x3e, 0xdf, 0x11, 0x39, 0xe4, 0xfd, 0x56, 0xd7, 0x2f, - 0x89, 0x96, 0x08, 0x54, 0xaa, 0xab, 0x8b, 0xfa, 0x65, 0xe5, 0x64, 0xff, - 0x24, 0x25, 0x8f, 0x7d, 0xf6, 0xb1, 0x7f, 0x2f, 0xa6, 0xf6, 0x46, 0xab, - 0x61, 0xfd, 0x47, 0xad, 0x6d, 0x38, 0x6d, 0xc1, 0xe9, 0x4a, 0xf1, 0x85, - 0x05, 0x0e, 0x69, 0x48, 0x7c, 0xa6, 0x76, 0x61, 0xe3, 0x94, 0xf2, 0xd6, - 0x7a, 0x9c, 0x79, 0xc0, 0x2a, 0x51, 0x23, 0xc6, 0xaf, 0x29, 0x04, 0x0f, - 0x47, 0xc2, 0x93, 0xd7, 0x64, 0xe5, 0x37, 0x2e, 0x53, 0x3b, 0xb7, 0x7c, - 0x9c, 0xb4, 0x63, 0x13, 0xc7, 0x56, 0x90, 0xe9, 0x53, 0xd5, 0x86, 0x2b, - 0x96, 0x41, 0x42, 0x56, 0xc5, 0x16, 0xd7, 0x9e, 0x30, 0xce, 0xa1, 0x0d, - 0x93, 0x5d, 0x11, 0x07, 0xb2, 0x95, 0xfd, 0xf6, 0x0b, 0x28, 0x95, 0x1a, - 0x8f, 0xfa, 0xe1, 0x57, 0x7e, 0x06, 0xff, 0x18, 0xaf, 0xe3, 0x4f, 0x3c, - 0x34, 0x5b, 0xd4, 0x46, 0x1a, 0xd1, 0xd1, 0x7e, 0x55, 0xba, 0x5d, 0x2a, - 0x1f, 0x42, 0x49, 0x95, 0x75, 0x5f, 0x80, 0x60, 0x02, 0x01, 0xdb, 0x36, - 0xad, 0x68, 0x69, 0x1e, 0x0b, 0x90, 0x3f, 0xa6, 0xb6, 0x2f, 0x66, 0xa6, - 0x7d, 0x81, 0x8c, 0xa0, 0xee, 0x05, 0x95, 0xbc, 0xb3, 0x7c, 0x18, 0xd4, - 0x1b, 0x40, 0x96, 0xf5, 0x05, 0x9d, 0x27, 0x3b, 0x78, 0xfc, 0x19, 0x18, - 0xc0, 0x61, 0xa0, 0xd6, 0xf9, 0xc0, 0x3f, 0xe5, 0x48, 0x35, 0x0f, 0x8b, - 0x0d, 0xfb, 0x31, 0xb7, 0x32, 0x40, 0x1d, 0x69, 0x12, 0x5a, 0x23, 0xf0, - 0xce, 0xe9, 0x5e, 0xa6, 0x68, 0x6b, 0xe1, 0xe2, 0x68, 0x07, 0x02, 0x0d, - 0x7a, 0xc2, 0x0a, 0x40, 0x10, 0x5e, 0x94, 0xba, 0x77, 0x1d, 0xf7, 0xac, - 0xec, 0x79, 0xa9, 0xa1, 0x8a, 0xb8, 0x49, 0x32, 0x08, 0xe0, 0x18, 0xa8, - 0x3d, 0x69, 0x41, 0x5d, 0x30, 0x3b, 0xb6, 0x91, 0x46, 0x8d, 0x81, 0x10, - 0xb0, 0xc2, 0xed, 0xa0, 0x4e, 0x59, 0x48, 0xd8, 0x64, 0x7d, 0x2d, 0x46, - 0xf2, 0x8a, 0x2e, 0x5d, 0x0c, 0x4d, 0x9f, 0xfe, 0x7b, 0x5e, 0xbf, 0x1a, - 0x78, 0xdf, 0xfc, 0x0f, 0x04, 0x37, 0x72, 0x1a, 0x09, 0xb8, 0x6e, 0x1b, - 0xf1, 0x18, 0x7d, 0x83, 0x44, 0xaa, 0x9b, 0x71, 0xe1, 0x03, 0x04, 0x83, - 0xe5, 0xaa, 0xc0, 0xd4, 0xa7, 0x80, 0x10, 0x35, 0x09, 0xae, 0xf7, 0xe1, - 0x5e, 0x7c, 0x31, 0x20, 0x43, 0x82, 0xda, 0x07, 0x39, 0xfe, 0x8f, 0x9d, - 0x70, 0x3c, 0x57, 0x43, 0x01, 0x51, 0x37, 0x2e, 0x97, 0xef, 0xcf, 0x05, - 0x44, 0x75, 0x69, 0xf7, 0xdb, 0xda, 0x80, 0x78, 0x0c, 0xcc, 0xc1, 0x49, - 0xac, 0x3b, 0x7e, 0x27, 0x6a, 0xbb, 0xdf, 0x45, 0x5b, 0x3b, 0x29, 0xf6, - 0x1b, 0xa9, 0x25, 0xf9, 0x2f, 0xcf, 0x37, 0x71, 0x33, 0xb4, 0x90, 0xd7, - 0x9b, 0x87, 0x41, 0x15, 0xd1, 0xa6, 0x39, 0xa7, 0xa9, 0xcd, 0x66, 0x29, - 0x59, 0xb4, 0x53, 0x12, 0xa1, 0x20, 0xd5, 0x04, 0xca, 0x40, 0x31, 0xfa, - 0x6f, 0xbb, 0x92, 0x04, 0xf3, 0xc2, 0x10, 0x0d, 0xc1, 0x19, 0x78, 0x8c, - 0x82, 0xed, 0x92, 0x3a, 0x6b, 0xd1, 0x3d, 0xe8, 0xac, 0x55, 0xe4, 0x8c, - 0xc6, 0xd4, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0xc2, 0x1d, 0x86, 0xe4, 0xf6, 0xa1, 0xbe, 0xf5, 0xf3, 0x36, 0x9d, 0x32, - 0x80, 0x17, 0x3b, 0x1f, 0x18, 0x21, 0xed, 0xa7, 0xf5, 0xaf, 0xf1, 0x94, - 0xe2, 0xa7, 0x08, 0xd5, 0xca, 0x18, 0x45, 0xf5, 0x68, 0x94, 0x82, 0x61, - 0xf7, 0xb7, 0xb2, 0xfa, 0xd4, 0x5e, 0x32, 0xd0, 0xf0, 0x20, 0x66, 0x83, - 0xd1, 0x6b, 0x3c, 0xdf, 0x73, 0xeb, 0x73, 0x82, 0x09, 0x9b, 0xd0, 0xc5, - 0xb0, 0x9f, 0x01, 0x77, 0x85, 0xcc, 0x6e, 0x23, 0xb7, 0x00, 0x45, 0xe0, - 0xa6, 0x01, 0x29, 0x1d, 0x8b, 0xc4, 0xe0, 0xc2, 0xe0, 0x4f, 0x3b, 0x07, - 0xd5, 0xac, 0x6b, 0x88, 0xb8, 0xa4, 0xe2, 0x5c, 0x19, 0xe9, 0x98, 0x72, - 0xa5, 0x6b, 0xf5, 0xa4, 0xf7, 0x15, 0xaf, 0xfb, 0xb4, 0x80, 0x9a, 0xe3, - 0xa5, 0x35, 0x2f, 0x45, 0x81, 0xf1, 0x8b, 0x2d, 0x26, 0x5c, 0x65, 0xa9, - 0x5b, 0x6e, 0x83, 0xc3, 0x62, 0x2f, 0x84, 0xef, 0x11, 0xa5, 0x58, 0x48, - 0xe9, 0x67, 0x7e, 0xd3, 0x0b, 0x5d, 0x51, 0x80, 0x39, 0x08, 0x8e, 0xc1, - 0x0d, 0x04, 0x11, 0x5f, 0x72, 0x64, 0x1f, 0x83, 0xf8, 0xd3, 0x09, 0x38, - 0xb6, 0x7f, 0x50, 0x78, 0x27, 0x20, 0xe5, 0xbd, 0x16, 0xbf, 0x51, 0xd8, - 0x4f, 0x67, 0x60, 0xf6, 0x9e, 0xff, 0x08, 0xfe, 0xc6, 0x96, 0xd6, 0x64, - 0x94, 0x28, 0xc6, 0x9a, 0x09, 0x1a, 0x34, 0x08, 0x31, 0x4b, 0x0b, 0x97, - 0x5a, 0x18, 0x72, 0x49, 0xe9, 0x1d, 0xbb, 0x9c, 0xed, 0x7e, 0xb5, 0xc5, - 0xa7, 0xf4, 0x25, 0x7a, 0x26, 0xe9, 0x15, 0x61, 0x85, 0x32, 0xc9, 0xb3, - 0xcf, 0x95, 0xbf, 0x35, 0x10, 0x2d, 0x71, 0xfe, 0x03, 0xd6, 0x69, 0x75, - 0x8d, 0xb7, 0x16, 0xa7, 0x3d, 0x0e, 0xb7, 0x55, 0x6d, 0xa7, 0x9f, 0x10, - 0x7e, 0x7e, 0xff, 0x39, 0xee, 0x8e, 0xa7, 0x81, 0x7d, 0x11, 0xea, 0xa9, - 0xd6, 0xed, 0x54, 0xf8, 0xd2, 0xd5, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xf9, 0xde, 0x41, 0xe7, 0xa6, 0x88, 0x53, 0x76, - 0x5a, 0x26, 0xc3, 0x5c, 0xf2, 0x58, 0x68, 0x9c, 0xc7, 0x4e, 0x53, 0x18, - 0x53, 0x67, 0x39, 0x23, 0x96, 0xb0, 0xef, 0x58, 0x29, 0xe1, 0x68, 0xd8, - 0xce, 0xc0, 0x41, 0xc2, 0x35, 0x5f, 0x74, 0xfa, 0xdf, 0xc7, 0x0f, 0x80, - 0x50, 0xd1, 0xf6, 0x5a, 0x3a, 0x81, 0xe0, 0xd9, 0x9b, 0x47, 0x96, 0xcd, - 0xc5, 0x0f, 0x91, 0x12, 0x81, 0x77, 0x1e, 0xef, 0x2e, 0xba, 0x16, 0x51, - 0x70, 0x78, 0xdc, 0xa3, 0x84, 0x12, 0x7c, 0x9e, 0x21, 0x7d, 0xa3, 0x5f, - 0xce, 0xa1, 0x25, 0x84, 0x99, 0xa4, 0x2d, 0xa6, 0x0f, 0x95, 0xef, 0xef, - 0x31, 0xe6, 0xf2, 0x18, 0x08, 0x47, 0xd2, 0x5a, 0x39, 0x01, 0x7a, 0xca, - 0xd3, 0x03, 0xb1, 0xc2, 0x48, 0xf4, 0x1f, 0x6d, 0xc2, 0x8c, 0x5c, 0xda, - 0xf5, 0x10, 0xed, 0xfc, 0x2e, 0x0c, 0xb3, 0x52, 0xaa, 0xa9, 0xed, 0xbc, - 0x41, 0xcc, 0xd4, 0x4b, 0x1c, 0xd0, 0xa3, 0x1d, 0xf4, 0xe7, 0x48, 0x34, - 0x4e, 0xcf, 0x3b, 0xb3, 0x71, 0x06, 0xbe, 0x0c, 0x35, 0xbb, 0xb4, 0x17, - 0xd8, 0x8b, 0xba, 0xdd, 0x32, 0x30, 0x51, 0xb1, 0xb1, 0xd6, 0x3a, 0xdc, - 0x3b, 0x25, 0x9a, 0x57, 0xc7, 0x4d, 0xd3, 0x75, 0x93, 0x59, 0x3e, 0x9b, - 0x10, 0xcf, 0xdb, 0x38, 0x75, 0x51, 0xb2, 0x2a, 0x48, 0x78, 0xfc, 0xaa, - 0xe3, 0x91, 0xe7, 0x93, 0xe7, 0x0a, 0x07, 0x2c, 0xf8, 0x88, 0x93, 0xde, - 0x2f, 0xba, 0x7b, 0x72, 0xcd, 0x92, 0xdd, 0xb1, 0xac, 0x1e, 0xe4, 0xe3, - 0x5d, 0xa4, 0x7f, 0x86, 0xa7, 0xcb, 0xb5, 0x81, 0x86, 0xf1, 0xf5, 0xad, - 0xd6, 0x36, 0x08, 0x09, 0x9f, 0x75, 0x6f, 0x4a, 0x5b, 0x30, 0xf8, 0xaf, - 0xd2, 0xbc, 0xb5, 0xbe, 0xf2, 0xeb, 0x9b, 0xbc, 0x11, 0xd4, 0x0c, 0x14, - 0xa6, 0x6f, 0x43, 0xd3, 0xc9, 0x4e, 0xca, 0x9b, 0x4e, 0x46, 0x60, 0x4c, - 0x63, 0xcc, 0x07, 0x36, 0x8c, 0xf2, 0xd1, 0x93, 0x7a, 0x51, 0x49, 0x15, - 0xbf, 0xbf, 0x9e, 0x82, 0x21, 0x06, 0xa0, 0x39, 0x11, 0x1d, 0x6c, 0x41, - 0x72, 0xcd, 0x2a, 0x8a, 0x4a, 0xd0, 0x13, 0x6c, 0x56, 0xf4, 0x00, 0x48, - 0xaf, 0xab, 0xdf, 0xa9, 0xe9, 0xa6, 0xaa, 0x06, 0x61, 0x79, 0xc4, 0x57, - 0x42, 0xca, 0x12, 0x18, 0xcf, 0x81, 0xec, 0x79, 0x19, 0xd2, 0xd2, 0xe3, - 0x1d, 0xc6, 0x6c, 0xd0, 0xd6, 0x0a, 0xfb, 0x70, 0x42, 0x28, 0x25, 0x23, - 0xb6, 0x23, 0x15, 0x28, 0x5e, 0x9f, 0x49, 0xf2, 0x7b, 0x69, 0x74, 0xa5, - 0xb9, 0x26, 0x81, 0xfe, 0x39, 0x3e, 0x3f, 0xc8, 0x7e, 0x9e, 0x5e, 0x8e, - 0xf2, 0xdb, 0x6b, 0xfd, 0xe1, 0xc3, 0x01, 0x4a, 0xba, 0x8f, 0x33, 0x71, - 0x09, 0x80, 0x5d, 0x9c, 0x58, 0x64, 0xb7, 0x90, 0x13, 0x2a, 0xe9, 0x1d, - 0x07, 0x2c, 0x06, 0x70, 0x43, 0x0d, 0xb6, 0x57, 0x02, 0x3c, 0xbe, 0x3c, - 0x42, 0xab, 0x77, 0x15, 0x0e, 0x98, 0xfb, 0xf2, 0x1d, 0x14, 0xd9, 0xb8, - 0xd1, 0x59, 0x2a, 0x67, 0x6f, 0xfc, 0x59, 0x39, 0x33, 0xe0, 0x49, 0x0b, - 0x4e, 0x65, 0x81, 0x9f, 0x71, 0xf2, 0xa5, 0x90, 0x4f, 0x24, 0xc7, 0x05, - 0xfb, 0x77, 0x1e, 0x14, 0xca, 0x2f, 0xfc, 0xac, 0xec, 0xbf, 0xa2, 0x69, - 0x15, 0x0a, 0x6b, 0xa9, 0xa0, 0x74, 0xee, 0xad, 0xa9, 0x50, 0x4d, 0x4d, - 0xab, 0x6e, 0xc1, 0xb3, 0xda, 0xbb, 0xbd, 0xab, 0x00, 0x05, 0x14, 0xc1, - 0xc4, 0x53, 0x7b, 0x78, 0x97, 0x68, 0x3c, 0x05, 0xf2, 0xed, 0x87, 0xca, - 0x86, 0xd1, 0xdf, 0xda, 0xb3, 0x2f, 0x17, 0x87, 0x87, 0x2f, 0xd8, 0xe9, - 0xb2, 0x96, 0xdc, 0x7f, 0x22, 0xf1, 0x2a, 0x9f, 0xfe, 0x54, 0x55, 0xa1, - 0x96, 0xab, 0x9f, 0x61, 0x74, 0xcd, 0x4d, 0x77, 0x38, 0x02, 0x23, 0x29, - 0x28, 0x5b, 0xfc, 0x86, 0x17, 0x40, 0xd4, 0x42, 0x2a, 0x9b, 0x84, 0xf7, - 0x67, 0x2b, 0x3a, 0xc1, 0x31, 0x89, 0x4b, 0x67, 0xd1, 0x7d, 0x6b, 0x36, - 0xec, 0x69, 0x6b, 0x24, 0xca, 0xd6, 0x2d, 0xbb, 0x21, 0xc8, 0x0c, 0x53, - 0x41, 0x29, 0x0b, 0xc1, 0xfe, 0xd5, 0xa3, 0x4c, 0x66, 0x2f, 0xc7, 0xf1, - 0xa8, 0xc0, 0x3d, 0x9a, 0xb9, 0x09, 0x50, 0x3f, 0x09, 0x87, 0xa4, 0x3f, - 0x7a, 0x33, 0xef, 0xf0, 0xfb, 0x77, 0x02, 0x7d, 0x92, 0xaf, 0x73, 0xaa, - 0xcc, 0x3f, 0x66, 0x56, 0xd0, 0x21, 0xd1, 0xe8, 0x0e, 0x47, 0x03, 0x5e, - 0x3b, 0xe9, 0xa2, 0xe3, 0x83, 0x0b, 0x73, 0xd3, 0xaa, 0x94, 0x80, 0xef, - 0x7c, 0xdf, 0xde, 0x86, 0xc3, 0xa9, 0x62, 0x34, 0x76, 0xee, 0x4d, 0x15, - 0x73, 0x7b, 0xd7, 0x6d, 0xd4, 0x21, 0x05, 0xd4, 0xcf, 0xf3, 0x54, 0xdc, - 0x49, 0x5f, 0x5a, 0x2a, 0x37, 0x19, 0x89, 0x61, 0x1d, 0x95, 0x17, 0x8b, - 0x09, 0x95, 0x5d, 0x9f, 0xde, 0x86, 0x03, 0x93, 0x76, 0xec, 0x54, 0xec, - 0x13, 0xc3, 0xf9, 0x38, 0x8f, 0xa9, 0x11, 0xf0, 0x9a, 0x0e, 0x5e, 0x38, - 0x69, 0xeb, 0x62, 0x41, 0x9e, 0xd0, 0x1b, 0x59, 0x8c, 0xfd, 0x16, 0xfa, - 0xd8, 0x99, 0x0d, 0x83, 0x7e, 0xba, 0x5b, 0xc6, 0x59, 0xe1, 0xae, 0xba, - 0xb9, 0xb8, 0xba, 0xa5, 0x4d, 0x20, 0x00, 0xc9, 0x0c, 0xe1, 0x77, 0xdf, - 0xc4, 0x95, 0xca, 0x7c, 0xa5, 0xef, 0x0a, 0xed, 0x9b, 0x31, 0x06, 0xe1, - 0xc9, 0xa3, 0x88, 0x0a, 0xcc, 0x3d, 0xc8, 0xb6, 0x01, 0xe2, 0xa9, 0x29, - 0x03, 0x8a, 0x28, 0xf8, 0x0d, 0x70, 0x77, 0xb9, 0xe1, 0x1b, 0x06, 0x19, - 0x86, 0xc1, 0xd3, 0xcf, 0x6b, 0x9c, 0x09, 0x70, 0x50, 0xed, 0xb5, 0xf6, - 0x69, 0xcc, 0xac, 0x30, 0x6a, 0x1f, 0x1d, 0xe6, 0x75, 0x33, 0xab, 0x55, - 0x48, 0xfa, 0x81, 0xb8, 0x06, 0x3a, 0x78, 0xee, 0xde, 0xef, 0xe2, 0x17, - 0xc4, 0x3e, 0xe5, 0x22, 0xa7, 0xd1, 0x45, 0x5b, 0x57, 0xb0, 0xde, 0x69, - 0x30, 0xd1, 0x9a, 0xd7, 0x6b, 0x0e, 0x7a, 0x30, 0x0d, 0xb5, 0xec, 0x60, - 0xa7, 0x05, 0x87, 0x42, 0x4b, 0x92, 0x1f, 0x68, 0x8e, 0x1a, 0x90, 0x84, - 0x27, 0x2a, 0xc0, 0xd2, 0xff, 0xbc, 0x8e, 0x34, 0x53, 0x9d, 0x04, 0x50, - 0xcb, 0x79, 0xd9, 0x55, 0xd5, 0x4d, 0x3c, 0xe2, 0xb4, 0x9b, 0x57, 0x07, - 0x1f, 0xce, 0xd0, 0xa7, 0x84, 0xe1, 0xb7, 0x3a, 0xaf, 0xc5, 0x67, 0x64, - 0xbc, 0x02, 0xbe, 0xb0, 0x65, 0x7e, 0xb0, 0x4c, 0xc2, 0x2d, 0xcd, 0xf8, - 0x60, 0xcb, 0xfe, 0xd1, 0x8d, 0x14, 0x5a, 0xd3, 0x38, 0xd4, 0x71, 0x5a, - 0xca, 0xbb, 0xfe, 0x0e, 0x54, 0xf9, 0xb4, 0x25, 0xa5, 0x71, 0x13, 0x95, - 0x14, 0xdc, 0x86, 0xb8, 0x21, 0xa7, 0x2e, 0x13, 0xc6, 0x2f, 0xce, 0xe7, - 0x6c, 0xb8, 0x0d, 0xc9, 0xe4, 0xc4, 0x64, 0x12, 0x78, 0x1c, 0x95, 0x92, - 0xc2, 0xec, 0xaa, 0xd3, 0xc3, 0x3a, 0xd2, 0xe8, 0x95, 0xf0, 0x6b, 0x03, - 0x8c, 0xcf, 0x6b, 0xdb, 0x21, 0xa0, 0xcf, 0xf4, 0x05, 0xc8, 0xe7, 0x77, - 0x05, 0x55, 0x7b, 0x6b, 0xfa, 0x96, 0xf1, 0x7c, 0x30, 0x62, 0x75, 0xbe, - 0x6e, 0xea, 0xba, 0x9f, 0x40, 0x2e, 0x9a, 0x86, 0x93, 0xcc, 0x38, 0xf7, - 0xee, 0xd8, 0xbb, 0x24, 0xcd, 0x85, 0x3e, 0x85, 0x16, 0x8c, 0x33, 0x23, - 0x73, 0xe6, 0x43, 0xc4, 0x67, 0xbf, 0xef, 0x85, 0xb1, 0x44, 0xf9, 0x55, - 0x93, 0x4d, 0x0b, 0x8e, 0xc1, 0x42, 0x13, 0xc6, 0xc8, 0x09, 0x63, 0xab, - 0xb3, 0xc7, 0xc4, 0xa4, 0x8b, 0x72, 0xfb, 0xa5, 0x99, 0xa1, 0x5d, 0x07, - 0x02, 0x82, 0x56, 0x11, 0x3c, 0xc2, 0x5a, 0x55, 0xf9, 0x3a, 0x93, 0x61, - 0x89, 0x46, 0xb7, 0x6a, 0x42, 0x76, 0x1e, 0x70, 0xde, 0xd9, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x32, 0xc1, 0x61, 0xaa, - 0xdb, 0xe9, 0xae, 0x88, 0xcb, 0xf7, 0x28, 0xdd, 0x82, 0x62, 0x61, 0x41, - 0x4e, 0xbb, 0xf9, 0xb7, 0xe8, 0x81, 0x99, 0x18, 0xe2, 0xa7, 0xb4, 0x7c, - 0xb7, 0x08, 0x44, 0x6f, 0x24, 0xb3, 0xda, 0x57, 0x62, 0x29, 0xc7, 0xa6, - 0x84, 0xb1, 0x5d, 0xc5, 0x00, 0x4c, 0x30, 0x16, 0xf0, 0x0a, 0x74, 0x73, - 0xec, 0xaf, 0xb5, 0xde, 0xb0, 0xa7, 0x75, 0x22, 0x8f, 0x9e, 0x43, 0x01, - 0x68, 0xae, 0x91, 0xeb, 0x46, 0x52, 0x3f, 0x2c, 0x4e, 0xc5, 0xd0, 0xc8, - 0x15, 0xea, 0x99, 0xc2, 0x37, 0x5b, 0x68, 0xb5, 0xce, 0x41, 0x92, 0xbf, - 0xd6, 0xdb, 0x85, 0xad, 0x08, 0xd1, 0x11, 0x93, 0xe8, 0xd4, 0x78, 0x43, - 0x3b, 0x7d, 0xcb, 0x42, 0x84, 0xf3, 0x61, 0x88, 0x9e, 0x6a, 0x73, 0xb9, - 0x78, 0x17, 0x9a, 0x9f, 0xfb, 0x97, 0xcb, 0xd6, 0xb5, 0x3f, 0x00, 0x41, - 0xb0, 0x30, 0x2f, 0x6f, 0x89, 0xdd, 0xfa, 0x13, 0xd1, 0x07, 0xbe, 0x2f, - 0xea, 0x91, 0x62, 0xaa, 0xed, 0xcb, 0xfd, 0x07, 0x82, 0xbb, 0x3f, 0xf4, - 0xa6, 0x94, 0x66, 0x71, 0x20, 0x61, 0xac, 0x84, 0x04, 0x70, 0xf2, 0xd3, - 0xdf, 0xac, 0x44, 0xfd, 0x47, 0x26, 0x81, 0x64, 0xb3, 0xa6, 0x90, 0x2b, - 0xd2, 0x2c, 0xd0, 0x77, 0x81, 0x53, 0x45, 0x78, 0x5f, 0x30, 0x77, 0x91, - 0x83, 0x13, 0x33, 0xd1, 0x91, 0xa6, 0x35, 0x21, 0xcb, 0x26, 0x54, 0x0a, - 0xf7, 0x70, 0x5e, 0xdb, 0xd8, 0x92, 0xc7, 0xdf, 0xf9, 0x2a, 0x46, 0x91, - 0x22, 0x3b, 0xe6, 0xe1, 0x91, 0xeb, 0xa6, 0x78, 0x81, 0x57, 0xf3, 0x04, - 0xdf, 0x34, 0x55, 0x74, 0x0a, 0xfe, 0xf2, 0xbd, 0xb3, 0xeb, 0xa3, 0x8e, - 0x71, 0x15, 0xa9, 0x2f, 0x53, 0xe2, 0xa1, 0x45, 0xdf, 0xe8, 0x29, 0x40, - 0xf1, 0x4b, 0x23, 0xdb, 0x8e, 0xee, 0x19, 0xa8, 0xd4, 0x15, 0x90, 0x8c, - 0x04, 0x46, 0x81, 0x49, 0x92, 0xe5, 0xe1, 0xfe, 0x99, 0x06, 0xfc, 0x3e, - 0x43, 0x58, 0x3b, 0x19, 0x7f, 0xd2, 0x13, 0x65, 0xc2, 0x64, 0x27, 0x6d, - 0x93, 0x6a, 0xcf, 0x48, 0x2a, 0x3d, 0xdd, 0x79, 0x9f, 0x05, 0x32, 0xeb, - 0xfd, 0xb4, 0xd2, 0x1d, 0x16, 0x61, 0x3d, 0x17, 0x4c, 0xb8, 0xad, 0x63, - 0x0e, 0x6b, 0x8a, 0x4a, 0x34, 0x4c, 0xb5, 0x3c, 0x0f, 0x05, 0x28, 0x8c, - 0x8b, 0xdf, 0xf4, 0xa0, 0x49, 0xbf, 0x34, 0x6c, 0x6a, 0x5f, 0x40, 0x95, - 0x48, 0x4b, 0x93, 0x1e, 0x61, 0x6d, 0x58, 0xc3, 0x86, 0x98, 0x70, 0x11, - 0x4e, 0x44, 0x65, 0xc1, 0x0d, 0xea, 0x2f, 0xda, 0x38, 0x16, 0xbd, 0xd4, - 0x7b, 0x3e, 0x31, 0xee, 0x42, 0x4c, 0xdc, 0xe9, 0x8b, 0x1f, 0xa9, 0xcf, - 0xab, 0x60, 0xb5, 0xb1, 0xd2, 0xf2, 0x6a, 0xe9, 0xbc, 0xcc, 0xcb, 0x60, - 0x4a, 0xca, 0x70, 0x79, 0x64, 0x9d, 0x07, 0x1e, 0xdb, 0xef, 0x34, 0xaf, - 0x17, 0x93, 0x6b, 0x60, 0x73, 0x2d, 0x8c, 0x08, 0x27, 0x1e, 0x46, 0x9f, - 0xcb, 0x33, 0xdd, 0x76, 0xef, 0x17, 0x58, 0x9a, 0x5f, 0x82, 0x78, 0x0f, - 0xbf, 0xe7, 0x0f, 0x3a, 0x1e, 0xa8, 0x30, 0xbf, 0xff, 0xc7, 0xc7, 0x82, - 0x8b, 0xc3, 0x65, 0x04, 0xfd, 0x45, 0xc9, 0x88, 0x99, 0x8e, 0x44, 0xc5, - 0x23, 0x1e, 0xbf, 0xf1, 0x95, 0x70, 0x35, 0xe6, 0x56, 0x4a, 0x53, 0xb2, - 0xac, 0x0c, 0xfd, 0xf5, 0x61, 0x26, 0x5b, 0x70, 0xd6, 0x4c, 0xfc, 0x0f, - 0xcc, 0x53, 0x6e, 0x25, 0xca, 0x1d, 0x0c, 0x56, 0xf7, 0x9c, 0x95, 0xf6, - 0x3c, 0x08, 0x0c, 0x64, 0xb1, 0x1c, 0x5c, 0xe6, 0x25, 0xa4, 0xa3, 0xb7, - 0xaf, 0x8b, 0xbc, 0xe1, 0x68, 0xdf, 0x10, 0xab, 0xbb, 0xd5, 0x30, 0x64, - 0x42, 0xf6, 0xe6, 0x9a, 0xb5, 0x59, 0x12, 0x76, 0x92, 0xac, 0x29, 0xe9, - 0x45, 0xdb, 0x2e, 0x62, 0x22, 0x58, 0x24, 0x89, 0xc8, 0x6a, 0x2a, 0xa7, - 0x3f, 0x04, 0x53, 0x4e, 0x07, 0x41, 0x4e, 0x5f, 0x95, 0x5f, 0x6e, 0x14, - 0x5b, 0xa7, 0xa7, 0xd3, 0x5a, 0xa2, 0x95, 0x4a, 0xc8, 0xe9, 0x3c, 0x5a, - 0x84, 0x50, 0xbc, 0xe1, 0x9c, 0x7a, 0x16, 0xe5, 0xc7, 0x04, 0x9d, 0x60, - 0x2e, 0x7d, 0xb3, 0x77, 0x5d, 0x86, 0x2e, 0xac, 0x57, 0x2a, 0x31, 0x26, - 0x23, 0x6e, 0xcc, 0x7f, 0xb8, 0x36, 0x29, 0xa9, 0xa8, 0xd9, 0xc6, 0x75, - 0xee, 0x16, 0x23, 0x27, 0x0f, 0xe1, 0xb0, 0x3d, 0x91, 0x3a, 0x26, 0x4a, - 0x60, 0x72, 0x14, 0xf9, 0x3c, 0x66, 0x66, 0xe8, 0x7d, 0x4a, 0x6f, 0x7e, - 0x63, 0x58, 0x6a, 0x28, 0x78, 0x50, 0xef, 0x3b, 0x9d, 0xeb, 0xb6, 0x4b, - 0x5d, 0x55, 0x80, 0x84, 0x97, 0x9b, 0x74, 0x4b, 0x5c, 0x09, 0x1d, 0xe7, - 0x57, 0xfc, 0x40, 0x3f, 0xa9, 0xbd, 0xdf, 0x61, 0x2a, 0x89, 0x62, 0x51, - 0xfc, 0x24, 0xee, 0xee, 0x97, 0x10, 0xca, 0xb6, 0x0e, 0x8e, 0x71, 0x67, - 0x2a, 0x79, 0x4f, 0xc4, 0xe6, 0x3e, 0x27, 0xc2, 0x9b, 0x85, 0xfd, 0xde, - 0xfb, 0x58, 0x75, 0xf3, 0x1c, 0x31, 0xa2, 0x56, 0x3e, 0xdc, 0x24, 0xf4, - 0x4f, 0xcb, 0x5a, 0x1a, 0x77, 0x5c, 0x28, 0xd1, 0x5a, 0x55, 0xa9, 0x8c, - 0xb5, 0xdd, 0x77, 0x93, 0x58, 0xd8, 0x2f, 0x7d, 0x5a, 0x67, 0xa1, 0x95, - 0x0a, 0xd2, 0x6a, 0x93, 0xa6, 0xf0, 0x5f, 0x7f, 0x0a, 0x29, 0xdb, 0x1d, - 0x8c, 0xa7, 0x12, 0x0a, 0xf4, 0xc9, 0xcd, 0x70, 0xd1, 0xbd, 0x48, 0xd4, - 0x9a, 0xbb, 0xbb, 0x24, 0xbf, 0x52, 0x25, 0xb9, 0x75, 0xc2, 0x17, 0x36, - 0x6f, 0x4a, 0xc0, 0x53, 0x6d, 0x38, 0xfb, 0x7a, 0x60, 0xc8, 0x5d, 0x03, - 0xc1, 0x1c, 0x0c, 0x31, 0xf0, 0x59, 0xed, 0x0a, 0x5f, 0x84, 0xf2, 0x89, - 0x6c, 0xb4, 0xd5, 0x24, 0x2d, 0x2a, 0xda, 0xbe, 0x74, 0x1d, 0x22, 0xe2, - 0xc6, 0xf0, 0x9b, 0x98, 0x5a, 0x41, 0x11, 0x4c, 0x51, 0x97, 0x16, 0xa7, - 0xc9, 0xd8, 0x53, 0x12, 0x53, 0xdd, 0x22, 0xa9, 0xf2, 0xae, 0x52, 0x49, - 0x02, 0xf9, 0x5c, 0x78, 0x00, 0xa2, 0x64, 0xff, 0x91, 0x62, 0x20, 0x6a, - 0x87, 0x6a, 0x40, 0x01, 0x85, 0x30, 0xf5, 0xdd, 0xa7, 0x64, 0x0a, 0x85, - 0x8d, 0x37, 0x99, 0xcb, 0x03, 0xc8, 0x29, 0x56, 0x7e, 0x75, 0x4f, 0xa1, - 0xc3, 0x76, 0xce, 0xdb, 0xa3, 0xb4, 0x7e, 0x91, 0x95, 0xbe, 0x53, 0x0e, - 0x20, 0xc9, 0xe7, 0x71, 0x78, 0xad, 0x3d, 0x4c, 0xbb, 0x59, 0xb9, 0x77, - 0xcf, 0x7d, 0x7b, 0xff, 0x15, 0xdb, 0x1d, 0xae, 0x1f, 0xbe, 0x33, 0x88, - 0x01, 0x04, 0x95, 0xe5, 0xe9, 0x6a, 0x1c, 0xbf, 0xc8, 0xc3, 0x33, 0x3b, - 0xd8, 0x2f, 0x75, 0x4a, 0xc3, 0x6f, 0x09, 0x88, 0x26, 0x46, 0x90, 0x89, - 0x53, 0x12, 0x27, 0xc2, 0x7d, 0x23, 0x6b, 0xc4, 0xe3, 0x0a, 0x0f, 0xc2, - 0x86, 0x6d, 0x20, 0x35, 0x82, 0x33, 0xec, 0xdd, 0xa7, 0x6a, 0xc3, 0xa8, - 0x11, 0xdc, 0x02, 0xd9, 0x05, 0x1b, 0x04, 0x75, 0x92, 0x6c, 0x08, 0x9e, - 0x38, 0x72, 0xd9, 0x7d, 0x9b, 0xbc, 0xfd, 0xca, 0xb8, 0x06, 0x0e, 0x24, - 0x89, 0x90, 0xde, 0x52, 0xe4, 0xd1, 0xcc, 0x99, 0x87, 0x0b, 0x87, 0xbb, - 0x5c, 0xa9, 0xab, 0xec, 0xb5, 0xe4, 0xdd, 0x5d, 0xfa, 0xb1, 0x97, 0x5f, - 0x61, 0xf7, 0x58, 0xd6, 0x08, 0x02, 0xf2, 0x51, 0x7c, 0x7a, 0xe6, 0xf1, - 0xcb, 0x43, 0xd0, 0x21, 0x09, 0xb8, 0x82, 0xa9, 0x52, 0xd9, 0xa8, 0x7f, - 0x2b, 0xe1, 0x0f, 0x31, 0xbc, 0x16, 0xa2, 0xce, 0x35, 0x55, 0x2e, 0xd6, - 0xda, 0x38, 0xd9, 0xc2, 0x5e, 0xca, 0x27, 0xd9, 0xa6, 0xd6, 0x4b, 0xa2, - 0x73, 0xc4, 0xce, 0x66, 0x30, 0x60, 0xa2, 0x01, 0xfa, 0xc1, 0xd6, 0xc8, - 0xea, 0xdd, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x70, 0xe2, 0x62, 0x68, 0xff, 0x60, 0x67, 0x64, 0x88, 0xdd, 0x81, 0x79, - 0x82, 0xf5, 0x46, 0xf9, 0x7e, 0x0e, 0xa9, 0x26, 0xf6, 0xcf, 0x5d, 0xef, - 0x10, 0x11, 0xe1, 0x71, 0x72, 0x77, 0xcf, 0x02, 0x7b, 0xf1, 0x6e, 0xc4, - 0xb4, 0xfa, 0x2a, 0x12, 0xfe, 0x7e, 0x3c, 0x66, 0xef, 0x41, 0x98, 0x3a, - 0x1f, 0xa9, 0x14, 0x8f, 0x46, 0x22, 0xa0, 0xc2, 0xee, 0x93, 0x25, 0x34, - 0xf2, 0xb7, 0x6d, 0x0a, 0x36, 0xde, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xd4, 0x17, 0x62, 0x25, 0xfd, 0x5b, 0x75, 0xeb, - 0xec, 0x06, 0xc9, 0x39, 0x86, 0x6d, 0xc5, 0x60, 0x2d, 0x33, 0x3d, 0xce, - 0x6a, 0x9f, 0x07, 0x3b, 0xb9, 0x70, 0x0f, 0xc7, 0x13, 0x46, 0x35, 0x46, - 0x26, 0xe4, 0xbc, 0x6e, 0x54, 0x89, 0x29, 0xd5, 0xa4, 0x94, 0xa0, 0x3a, - 0x7a, 0x61, 0xcf, 0xd1, 0x48, 0x27, 0x7a, 0x72, 0x95, 0xde, 0x93, 0xd1, - 0x19, 0x1f, 0xc9, 0xc8, 0x8f, 0x0d, 0xce, 0x34, 0x03, 0x39, 0x0a, 0x92, - 0x16, 0x09, 0xc4, 0x49, 0xf9, 0x30, 0x2e, 0x19, 0xd1, 0x69, 0x7e, 0x78, - 0x00, 0x25, 0x30, 0x6f, 0x6b, 0xe1, 0xbe, 0xad, 0xb2, 0x05, 0xde, 0xc7, - 0xc2, 0xf7, 0xd5, 0xa7, 0x4d, 0x03, 0x6f, 0x6b, 0xcd, 0xcb, 0x42, 0xfa, - 0x88, 0x16, 0xd5, 0xa6, 0x60, 0x08, 0xd4, 0xa5, 0x5b, 0x3b, 0x7b, 0xa2, - 0xca, 0xa3, 0xa2, 0x5d, 0x63, 0x7f, 0xc0, 0x37, 0xc5, 0x7e, 0x99, 0x04, - 0x5d, 0x9a, 0xb9, 0xa5, 0xac, 0xd1, 0xe2, 0x5d, 0xb2, 0x2b, 0x7e, 0xbb, - 0xb9, 0x66, 0x13, 0xa7, 0x30, 0xbf, 0x80, 0x0c, 0x2b, 0x8d, 0x45, 0xe1, - 0x8d, 0x96, 0x25, 0x27, 0x47, 0x3d, 0x21, 0x7d, 0x1c, 0x42, 0xac, 0x31, - 0x26, 0x47, 0x59, 0xb3, 0x44, 0x85, 0xf2, 0x8e, 0x7d, 0x01, 0x96, 0x6d, - 0xb2, 0x64, 0xc3, 0xfc, 0xa7, 0x82, 0x06, 0x4a, 0x87, 0x75, 0x9b, 0x99, - 0x47, 0x7e, 0xa6, 0x4d, 0x2c, 0x36, 0xff, 0xac, 0x2b, 0x77, 0x96, 0x52, - 0x14, 0x8d, 0x07, 0x0d, 0x28, 0x9d, 0x84, 0xa2, 0xda, 0xd6, 0x45, 0x3a, - 0xd4, 0xe6, 0xb7, 0x9a, 0xf3, 0x34, 0xe3, 0xda, 0x39, 0xdf, 0x35, 0x9c, - 0xe4, 0x87, 0x55, 0xc8, 0x43, 0xd0, 0x61, 0x46, 0x52, 0x2f, 0x75, 0x63, - 0xbb, 0x98, 0x97, 0xeb, 0xfb, 0x15, 0xaf, 0x8e, 0x96, 0xdc, 0xff, 0x0a, - 0x90, 0xda, 0x09, 0x63, 0x28, 0x7b, 0x92, 0x73, 0x0b, 0xd4, 0x2b, 0x72, - 0x2a, 0x86, 0x32, 0xc3, 0xc1, 0x3e, 0xe4, 0x2c, 0x07, 0x89, 0x53, 0xb7, - 0xfe, 0x78, 0x6c, 0x95, 0xb4, 0x62, 0x4d, 0x4b, 0xfe, 0x6c, 0xfc, 0x5e, - 0x4e, 0xa7, 0x8c, 0x07, 0x4f, 0x85, 0x27, 0xe0, 0x7b, 0xd9, 0x7a, 0xe5, - 0x1d, 0xbc, 0x36, 0xda, 0x8e, 0x21, 0xff, 0xb3, 0x60, 0x2c, 0x5e, 0x23, - 0x0f, 0xde, 0x3f, 0xae, 0xa5, 0x3a, 0x50, 0xa9, 0x99, 0x39, 0x45, 0xaf, - 0xd3, 0x5f, 0x4a, 0x15, 0xad, 0x9c, 0x66, 0x7f, 0x92, 0xe0, 0x02, 0x81, - 0x3e, 0x06, 0x6a, 0x5e, 0xd0, 0x0c, 0x42, 0xe7, 0xcf, 0xe2, 0xeb, 0xa3, - 0xe0, 0xf7, 0x2d, 0x8a, 0x21, 0xdb, 0x64, 0x28, 0x2a, 0xb3, 0x2b, 0xc4, - 0xc9, 0xd5, 0x60, 0xaf, 0xfc, 0x15, 0xa1, 0x44, 0x9c, 0x96, 0x04, 0x42, - 0x1c, 0x55, 0x8c, 0xa5, 0xce, 0x80, 0xce, 0x75, 0x64, 0xa9, 0xf6, 0xa5, - 0x5a, 0x0f, 0x8a, 0x4b, 0x8b, 0x72, 0xcf, 0x3e, 0xd7, 0xeb, 0xe1, 0xd0, - 0xd3, 0x2d, 0x04, 0x6c, 0x9e, 0x02, 0x75, 0x43, 0x5c, 0xc1, 0x57, 0x66, - 0xd9, 0x14, 0x5b, 0x08, 0x10, 0x44, 0x8d, 0x8e, 0x89, 0xd1, 0x65, 0x27, - 0x2a, 0x0b, 0x99, 0x6f, 0x09, 0xa6, 0x20, 0xa5, 0x75, 0x24, 0xe4, 0xf7, - 0xf5, 0xe0, 0xed, 0x79, 0x37, 0x18, 0x13, 0x1c, 0xd9, 0xd1, 0xf5, 0x69, - 0x0c, 0xa5, 0x02, 0xdf, 0x6a, 0xfd, 0x2e, 0x35, 0x8e, 0xd0, 0x41, 0x91, - 0x61, 0x0f, 0x5c, 0xdd, 0x70, 0xbf, 0x1c, 0x49, 0xcb, 0xe9, 0xc9, 0x33, - 0xc4, 0x99, 0x1e, 0x8b, 0x75, 0x48, 0xc2, 0x58, 0xa4, 0x70, 0x1f, 0xbb, - 0xcd, 0xd3, 0x0e, 0x79, 0x25, 0xbe, 0x53, 0xfa, 0x32, 0x32, 0xf6, 0xb9, - 0xf0, 0x0a, 0x52, 0x5b, 0xe0, 0x69, 0xff, 0x43, 0xda, 0x98, 0x1f, 0xee, - 0x54, 0x60, 0xf8, 0x24, 0x43, 0xc5, 0x37, 0x72, 0xd1, 0xfc, 0x99, 0x9a, - 0x3e, 0x24, 0xe0, 0xd9, 0xc2, 0x61, 0x47, 0xb3, 0x26, 0x09, 0x85, 0x74, - 0xa1, 0x2b, 0x4a, 0x70, 0xd0, 0x1b, 0x90, 0x03, 0x25, 0xd9, 0x22, 0xc2, - 0x16, 0x22, 0x3a, 0x62, 0x20, 0xd4, 0x13, 0xce, 0xa2, 0xc7, 0x02, 0xfb, - 0x9a, 0xbf, 0xf1, 0x1c, 0x80, 0x01, 0x97, 0x90, 0x7f, 0x5a, 0x98, 0x70, - 0x30, 0x61, 0x77, 0xe5, 0xd4, 0x3b, 0x03, 0x42, 0x57, 0x31, 0x5e, 0xc6, - 0x64, 0xe1, 0xf4, 0x64, 0x77, 0x21, 0x9b, 0x44, 0x1c, 0xd9, 0x8c, 0x95, - 0x8a, 0xf1, 0xcb, 0x82, 0xac, 0xc1, 0x26, 0x31, 0xf2, 0x22, 0x41, 0xab, - 0xbb, 0x23, 0xd3, 0x8d, 0xcc, 0x5c, 0x9d, 0x9b, 0x1d, 0x9c, 0x4d, 0xf3, - 0x62, 0xde, 0x15, 0x6a, 0x94, 0x8d, 0x24, 0xe7, 0x52, 0x8d, 0x2a, 0xa4, - 0x1d, 0x54, 0x5a, 0xda, 0xaf, 0xab, 0x05, 0x27, 0x4b, 0xbb, 0xb4, 0xda, - 0x0c, 0xb9, 0x20, 0xb3, 0xaf, 0x4a, 0xeb, 0x37, 0xe5, 0x43, 0xe4, 0xc1, - 0xf6, 0x9e, 0xf8, 0x6c, 0xd8, 0xa1, 0x0c, 0xf9, 0xd1, 0x4b, 0x96, 0xa0, - 0x6d, 0x38, 0x64, 0x41, 0xd3, 0x14, 0xfb, 0xad, 0x89, 0xa9, 0xf7, 0x36, - 0x01, 0x0f, 0xbe, 0x8e, 0xd7, 0x76, 0xc6, 0x70, 0x22, 0x32, 0x8b, 0x08, - 0xca, 0x95, 0xbf, 0xcf, 0x5e, 0xb8, 0xc0, 0x3f, 0xd9, 0xaa, 0x84, 0xab, - 0x30, 0x5b, 0xe3, 0x7a, 0x61, 0x32, 0xe5, 0x54, 0x01, 0x5e, 0xb6, 0x1c, - 0x9c, 0x78, 0x52, 0x2a, 0xa7, 0xf5, 0x29, 0xa6, 0x0f, 0x14, 0xa5, 0x3a, - 0x34, 0xd4, 0xf5, 0xc2, 0xb2, 0x8d, 0x12, 0x7b, 0x8a, 0x64, 0x00, 0xfd, - 0x02, 0x0e, 0x02, 0x26, 0x5a, 0xb9, 0xeb, 0xfd, 0x30, 0xce, 0x51, 0xec, - 0x5f, 0xbc, 0xee, 0x53, 0x21, 0xec, 0x0e, 0xee, 0xc4, 0x28, 0x1a, 0xec, - 0x2a, 0x39, 0x4e, 0xe1, 0x50, 0x11, 0x3f, 0x16, 0xdd, 0xbf, 0xaf, 0x3e, - 0xbe, 0xd4, 0xfe, 0x34, 0x1e, 0x62, 0x3f, 0x5a, 0xea, 0x05, 0xfc, 0xd5, - 0x45, 0x08, 0x47, 0xce, 0x38, 0x3f, 0x75, 0x7e, 0x0c, 0x3a, 0x2a, 0x14, - 0xa7, 0x61, 0xba, 0x3a, 0xa1, 0x41, 0xa2, 0x72, 0x19, 0xfa, 0x33, 0x43, - 0xa7, 0xf4, 0x4e, 0x5b, 0xf9, 0xb1, 0x45, 0x16, 0x57, 0x8e, 0xb1, 0xad, - 0x7d, 0x88, 0xd3, 0x93, 0xa2, 0x08, 0xf3, 0x96, 0x4d, 0x84, 0x63, 0x08, - 0xfa, 0x9d, 0xf3, 0x04, 0x33, 0xbd, 0x7e, 0x7a, 0xc7, 0x63, 0xc5, 0x31, - 0x5a, 0x82, 0x33, 0x90, 0x56, 0x44, 0xe9, 0xd3, 0xc4, 0xd4, 0x76, 0x29, - 0x2f, 0xdb, 0xa3, 0x9d, 0xff, 0xd4, 0xd2, 0xb1, 0xce, 0xf1, 0xcb, 0x7f, - 0x10, 0x3b, 0x90, 0xa4, 0x1b, 0xa0, 0x9b, 0xa7, 0xfa, 0x27, 0x40, 0x11, - 0x35, 0xc9, 0x7f, 0x01, 0x97, 0x76, 0x9f, 0x33, 0xc5, 0xd6, 0x8d, 0x20, - 0x07, 0x73, 0x93, 0x0b, 0x24, 0x88, 0x4e, 0x73, 0x68, 0x79, 0x92, 0x20, - 0x2a, 0x71, 0xed, 0x22, 0x0b, 0xfb, 0x42, 0xb5, 0xd9, 0xc3, 0xaa, 0xed, - 0x45, 0x03, 0x64, 0xde, 0x6f, 0x25, 0x8e, 0x3b, 0x9a, 0xef, 0xc5, 0x63, - 0xc2, 0x7f, 0x34, 0xd0, 0x1b, 0x20, 0xa3, 0xab, 0x9d, 0x54, 0x41, 0x0e, - 0x7b, 0x2e, 0x96, 0x12, 0x75, 0x58, 0xdf, 0xd5, 0xaa, 0x3c, 0xf2, 0x26, - 0xc1, 0xf1, 0x18, 0x37, 0x56, 0xf2, 0xd2, 0x86, 0x6f, 0xd4, 0x9f, 0x57, - 0x2b, 0x32, 0xe9, 0x08, 0x94, 0x53, 0x40, 0xc5, 0x4d, 0x77, 0x39, 0xc6, - 0x4c, 0x63, 0x53, 0xf9, 0xbf, 0x35, 0x08, 0xc5, 0x0d, 0xd0, 0x89, 0x82, - 0xa7, 0x2d, 0x6a, 0xb4, 0x22, 0xb1, 0x10, 0x7f, 0xcf, 0x2e, 0x21, 0x27, - 0x9c, 0x12, 0xc6, 0x0e, 0xca, 0xd2, 0x32, 0xb1, 0x6d, 0xfd, 0x59, 0x12, - 0x23, 0x60, 0x46, 0x89, 0xe0, 0x75, 0x5e, 0xc9, 0xf4, 0x3d, 0x8a, 0x89, - 0xd4, 0x23, 0xc2, 0xbe, 0x30, 0x32, 0x4a, 0x95, 0x42, 0xe2, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0xa7, 0x0b, 0x48, 0xe2, - 0xeb, 0xd7, 0x12, 0x42, 0x4c, 0x71, 0xfb, 0x25, 0x17, 0x23, 0x0e, 0x01, - 0xa6, 0x21, 0xb9, 0x17, 0x6e, 0xf0, 0x24, 0x66, 0x9e, 0x9d, 0x0f, 0x71, - 0xf8, 0x5b, 0x79, 0xb0, 0x1b, 0x1f, 0xe7, 0xa2, 0xc0, 0x17, 0x16, 0x08, - 0x5e, 0x24, 0x7b, 0xf9, 0x7a, 0x1e, 0x70, 0xe2, 0x05, 0x40, 0x16, 0x56, - 0xe7, 0x79, 0xf2, 0x30, 0xa3, 0xdc, 0xe3, 0x7a, 0x7e, 0x22, 0x88, 0xc0, - 0xf7, 0xc8, 0x5c, 0x93, 0x95, 0x86, 0x02, 0x6c, 0x73, 0x76, 0xef, 0x03, - 0x2d, 0xcb, 0xa5, 0x22, 0xfe, 0x05, 0xbb, 0xe6, 0xfd, 0x19, 0x8c, 0x8b, - 0x67, 0x58, 0x81, 0x81, 0x2d, 0x36, 0xd0, 0xc1, 0x20, 0xb2, 0x87, 0x87, - 0xdb, 0xe4, 0xe5, 0xd1, 0xd1, 0xd5, 0x81, 0x34, 0x4c, 0xd6, 0x09, 0xa2, - 0x5d, 0xcc, 0x99, 0x12, 0xa5, 0x06, 0x0f, 0x06, 0x7e, 0xbb, 0x67, 0x26, - 0x69, 0x15, 0x6e, 0x5f, 0xb1, 0x8e, 0xd6, 0x34, 0xfc, 0x4d, 0xd9, 0x03, - 0xb7, 0x5a, 0xf4, 0xaa, 0x03, 0x00, 0x88, 0x6b, 0x5a, 0xc9, 0xf2, 0xfb, - 0x67, 0x72, 0xbc, 0xf7, 0xb9, 0xdc, 0x97, 0xdf, 0x80, 0x91, 0xfa, 0x30, - 0x18, 0x02, 0x89, 0xc7, 0xc9, 0x62, 0x1d, 0xc0, 0x0b, 0xa6, 0xfe, 0x7e, - 0xb9, 0xa9, 0x1f, 0x11, 0x71, 0xe1, 0xd1, 0xfe, 0x8d, 0x90, 0x2c, 0x09, - 0x82, 0x2e, 0x36, 0x79, 0xa5, 0x75, 0x54, 0xfb, 0xd3, 0x3c, 0xb4, 0x18, - 0x2f, 0x4e, 0x3f, 0x37, 0xc4, 0xf8, 0xc5, 0x59, 0xa3, 0xfd, 0x0c, 0x62, - 0x9e, 0xa8, 0x7a, 0x56, 0xc5, 0x97, 0x89, 0x35, 0xc7, 0xb0, 0x29, 0x87, - 0xbf, 0x6a, 0xdc, 0xb1, 0x2f, 0x01, 0xf4, 0x0d, 0x7c, 0x25, 0x95, 0x39, - 0x81, 0xdd, 0x1a, 0x81, 0x36, 0xc0, 0x6b, 0xbf, 0x6b, 0x4d, 0xea, 0x23, - 0xc0, 0x3e, 0x5c, 0x39, 0xe5, 0x6b, 0x59, 0xa0, 0x50, 0x02, 0x99, 0xdf, - 0x4e, 0xe3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x17, 0x88, 0xf8, 0xda, 0x3d, 0x57, 0x83, 0x63, 0x76, 0xa0, 0x5c, 0x13, - 0x1a, 0x00, 0x64, 0x30, 0x19, 0xfd, 0x2e, 0x9c, 0x64, 0xb6, 0xda, 0x51, - 0x7b, 0x55, 0xe8, 0xc4, 0x67, 0x1b, 0xda, 0xfc, 0x4c, 0xd0, 0x27, 0x58, - 0x56, 0xa1, 0x52, 0xd2, 0xb8, 0xd8, 0xd5, 0x94, 0x69, 0xcf, 0xd0, 0xd5, - 0x72, 0xeb, 0x2b, 0x05, 0xf3, 0x12, 0xa6, 0xac, 0xa6, 0xf7, 0x90, 0x24, - 0x1f, 0x22, 0x97, 0x5e, 0x8b, 0x7c, 0x2c, 0x30, 0x61, 0x11, 0x9b, 0xdf, - 0x83, 0x2b, 0x10, 0x09, 0x42, 0x77, 0x2b, 0xd9, 0x43, 0xb3, 0x27, 0x69, - 0x75, 0xf2, 0x2e, 0x72, 0xed, 0x50, 0xea, 0xbf, 0x7f, 0x47, 0x39, 0x9c, - 0xf8, 0x1e, 0xce, 0x6f, 0xdd, 0xe8, 0x40, 0xc5, 0x14, 0x01, 0x7e, 0xbb, - 0x0f, 0x43, 0x2d, 0x36, 0x70, 0x54, 0xc6, 0xbe, 0x69, 0x24, 0xd1, 0x65, - 0x49, 0x77, 0xf0, 0xd2, 0x99, 0xb4, 0x50, 0x8d, 0x98, 0xcb, 0xbf, 0x7a, - 0x7c, 0x65, 0xd3, 0x46, 0xcf, 0x90, 0x69, 0x56, 0x15, 0xa2, 0xae, 0x11, - 0x94, 0x60, 0xf9, 0x45, 0x17, 0x54, 0x6b, 0xbd, 0xeb, 0xd8, 0x74, 0x41, - 0x5c, 0xf6, 0x49, 0x0a, 0x14, 0xce, 0x43, 0x1f, 0x67, 0xc3, 0x6c, 0xf4, - 0x01, 0xce, 0x3f, 0x85, 0xed, 0x19, 0xa1, 0xf7, 0x1b, 0xf8, 0x46, 0x45, - 0xb4, 0xe9, 0xa7, 0x1f, 0x2a, 0x65, 0x00, 0x2a, 0xd3, 0x8b, 0x6a, 0x3b, - 0xac, 0x78, 0xab, 0xf4, 0xc8, 0x62, 0x76, 0xc8, 0x24, 0xf8, 0xf8, 0x08, - 0xe0, 0x64, 0x00, 0x64, 0x74, 0x9e, 0x55, 0x2e, 0xf8, 0xc9, 0xc8, 0x58, - 0x0e, 0x1f, 0x27, 0x32, 0xfd, 0x30, 0x24, 0x68, 0xc8, 0xa4, 0x8c, 0x1c, - 0xf3, 0xa7, 0x32, 0xae, 0x84, 0x0a, 0x8a, 0x1e, 0x11, 0xce, 0xb2, 0x02, - 0xf1, 0xb3, 0x5f, 0x7d, 0x5e, 0x54, 0x8c, 0xe0, 0xeb, 0x46, 0x6e, 0x8a, - 0x5f, 0x3f, 0x71, 0x47, 0x2a, 0x8a, 0xe6, 0xf0, 0xb0, 0x04, 0x49, 0x64, - 0xb3, 0x7e, 0x16, 0x09, 0x83, 0x5f, 0x12, 0xe0, 0x85, 0xb7, 0x36, 0xc0, - 0x8a, 0xa5, 0xcd, 0xae, 0xc0, 0xb4, 0xa2, 0x62, 0x9b, 0xfa, 0x64, 0x18, - 0x16, 0x8e, 0xb6, 0x50, 0xf2, 0x9b, 0xc4, 0x7d, 0x0c, 0x4c, 0x8b, 0x58, - 0xcf, 0x9b, 0x87, 0x09, 0xb1, 0x37, 0xbb, 0xaf, 0xa7, 0x72, 0x79, 0x81, - 0x09, 0x55, 0xa1, 0x6a, 0x87, 0xb0, 0x7d, 0xc8, 0xb0, 0xc1, 0xa4, 0xa9, - 0xdf, 0xcf, 0x95, 0x77, 0x36, 0x8e, 0x2b, 0xae, 0xeb, 0x4b, 0xf9, 0x2a, - 0x83, 0x6c, 0x53, 0x3c, 0x89, 0xa6, 0x08, 0xae, 0x00, 0x4e, 0xb8, 0xf6, - 0x34, 0x7c, 0xc6, 0x76, 0x87, 0x1a, 0x02, 0xb0, 0x89, 0xa3, 0x0f, 0x00, - 0xc6, 0x7b, 0xeb, 0xf7, 0x95, 0x40, 0xc5, 0x0d, 0x6f, 0x74, 0xd8, 0x21, - 0x2f, 0x9f, 0x24, 0xac, 0x43, 0xdb, 0x3a, 0x39, 0x6c, 0x34, 0x59, 0x62, - 0x66, 0xbc, 0x28, 0x7f, 0x8c, 0x64, 0x62, 0x8c, 0x28, 0x6c, 0xf5, 0x79, - 0x24, 0xb1, 0x00, 0x9c, 0x58, 0x6b, 0x09, 0xef, 0xb0, 0x73, 0xcd, 0x47, - 0xbb, 0x52, 0xfd, 0x26, 0x6a, 0xff, 0xb9, 0xf1, 0xd5, 0x82, 0x59, 0x01, - 0xfa, 0x87, 0x14, 0x24, 0x10, 0xb0, 0xf7, 0xdf, 0xf9, 0x3f, 0x67, 0x19, - 0xbd, 0xc7, 0x85, 0xb0, 0xad, 0x47, 0xa8, 0x4c, 0x3e, 0xb6, 0x2e, 0x8a, - 0xb3, 0xcc, 0x35, 0xa0, 0x48, 0xc7, 0x90, 0x81, 0xb7, 0x53, 0x1c, 0x38, - 0x63, 0xf2, 0x2f, 0xa0, 0x71, 0x82, 0xe2, 0x56, 0xdb, 0x68, 0xe8, 0x5f, - 0xf8, 0x42, 0xf2, 0xf6, 0xb8, 0x10, 0x6b, 0x54, 0x21, 0xa0, 0xc1, 0xfe, - 0xcb, 0xce, 0x12, 0xa2, 0x49, 0x51, 0x86, 0x53, 0x56, 0xec, 0x33, 0xb3, - 0x72, 0xce, 0xa4, 0x46, 0xe3, 0x37, 0xcb, 0xc0, 0x95, 0xaa, 0xe2, 0xa3, - 0xc5, 0xe9, 0x36, 0x40, 0xfe, 0xf7, 0xe2, 0x5a, 0x6d, 0x58, 0x39, 0xb2, - 0x41, 0x5d, 0xe2, 0x71, 0x72, 0xd0, 0xf0, 0x5c, 0x16, 0x88, 0x95, 0x30, - 0x0a, 0xfb, 0x8d, 0xda, 0x14, 0x80, 0xf4, 0x15, 0xf2, 0xf6, 0xac, 0xf3, - 0xd8, 0x8d, 0x13, 0x24, 0x2c, 0x74, 0x60, 0x6e, 0x8c, 0xa1, 0x59, 0xcf, - 0x74, 0x7c, 0x2d, 0x0b, 0xbb, 0x06, 0x5c, 0x9d, 0xcd, 0xf3, 0x1e, 0x4a, - 0xba, 0x3f, 0x9c, 0x4a, 0xc4, 0xd7, 0xf9, 0xf0, 0xa5, 0x56, 0x7f, 0xb0, - 0xa2, 0x57, 0xd0, 0xc3, 0xaa, 0xa7, 0xd0, 0x49, 0xe2, 0x28, 0x9b, 0xc4, - 0x64, 0x0c, 0xe0, 0x71, 0x9c, 0x05, 0x04, 0x95, 0x00, 0x1f, 0x7b, 0xa9, - 0xb9, 0xb3, 0x2b, 0x8f, 0x0b, 0x45, 0x1e, 0x23, 0xaa, 0x27, 0x89, 0x4a, - 0xb0, 0x7d, 0x03, 0xdf, 0xae, 0xdb, 0xcb, 0xc4, 0xec, 0x3b, 0x02, 0xe2, - 0x85, 0x3a, 0xb7, 0x25, 0xfb, 0xab, 0xca, 0xc1, 0x33, 0x00, 0x5b, 0xd2, - 0xcf, 0xb0, 0x11, 0x1d, 0x51, 0xb5, 0x5b, 0xea, 0x94, 0xf7, 0xa0, 0x98, - 0x33, 0xba, 0x58, 0xfc, 0x12, 0xea, 0xdd, 0x89, 0xbd, 0x63, 0x03, 0xbe, - 0x7e, 0x3b, 0x69, 0xc4, 0x9d, 0x57, 0x0f, 0xd6, 0xbe, 0xea, 0x5b, 0xd0, - 0x97, 0x63, 0x89, 0xb0, 0xa0, 0xc0, 0xd6, 0x39, 0xc1, 0x69, 0x12, 0x6a, - 0xfb, 0xac, 0x74, 0x7f, 0xfb, 0xf4, 0x7f, 0x38, 0x44, 0x4c, 0x8a, 0xa2, - 0x41, 0x15, 0xc0, 0x54, 0xc0, 0xed, 0x14, 0x83, 0xef, 0xbc, 0x9c, 0xc7, - 0xdd, 0x21, 0xd6, 0xf0, 0x9b, 0x7f, 0x09, 0xd5, 0x96, 0xe5, 0xf7, 0xc5, - 0xa9, 0xb3, 0x41, 0xb0, 0x9d, 0xeb, 0x49, 0x68, 0x9d, 0x2b, 0xea, 0x47, - 0x80, 0x3b, 0x54, 0xb8, 0xf4, 0x14, 0x5e, 0xd6, 0x66, 0x89, 0x04, 0xb3, - 0x00, 0xa3, 0xa8, 0x32, 0x62, 0x2e, 0xc3, 0x15, 0xc6, 0x93, 0x7d, 0x40, - 0x32, 0xb1, 0x6b, 0x60, 0xd3, 0x52, 0xdf, 0x09, 0x8c, 0x80, 0x2b, 0x01, - 0xe7, 0x97, 0x8d, 0xbb, 0x14, 0xd6, 0x10, 0x15, 0x64, 0x00, 0x4a, 0x2c, - 0x67, 0xca, 0xd0, 0xa1, 0x37, 0x33, 0x7b, 0xa1, 0x2a, 0x5b, 0x5b, 0x78, - 0xf8, 0x2f, 0xdd, 0x76, 0xab, 0x8a, 0xc3, 0xe3, 0x37, 0x00, 0xd1, 0x29, - 0xb0, 0x96, 0x1d, 0x18, 0xbe, 0x5d, 0x32, 0x7e, 0xb7, 0x11, 0xa9, 0x78, - 0x72, 0xa2, 0x2d, 0x29, 0x1c, 0x32, 0xa4, 0xff, 0xc7, 0xce, 0xfe, 0xaf, - 0xb7, 0x17, 0x43, 0xe5, 0x2f, 0xae, 0x45, 0xd3, 0xaf, 0x10, 0xe3, 0xd0, - 0x58, 0xb6, 0xee, 0xee, 0x7a, 0xb5, 0x06, 0x70, 0x26, 0x7e, 0x2d, 0x5b, - 0xd5, 0xe1, 0x7b, 0x9a, 0x37, 0x02, 0xfc, 0x1d, 0x08, 0x4f, 0x1a, 0xf5, - 0x44, 0x63, 0xde, 0x4b, 0x14, 0x68, 0x54, 0x0b, 0x6a, 0x22, 0x4e, 0x02, - 0x65, 0xcd, 0xf4, 0x04, 0xec, 0xcc, 0x8a, 0x0b, 0xe0, 0x59, 0xf8, 0x65, - 0x25, 0x63, 0xed, 0x0f, 0xa6, 0xc5, 0x3c, 0xcb, 0x5d, 0xc5, 0xd8, 0x9f, - 0x5a, 0xd3, 0x88, 0x3d, 0xd4, 0x2c, 0xb3, 0x04, 0xf6, 0x97, 0xc7, 0xe2, - 0xfd, 0xb6, 0xf4, 0x7d, 0x0d, 0xb9, 0x75, 0x7e, 0x9d, 0x81, 0xdc, 0xdf, - 0x8e, 0x90, 0x40, 0x0c, 0x7b, 0x45, 0xfe, 0x68, 0xfd, 0xff, 0x1c, 0xf1, - 0x16, 0x09, 0x33, 0x74, 0x27, 0x7b, 0x4d, 0xd9, 0x9b, 0x48, 0x6d, 0x84, - 0xeb, 0x96, 0x8f, 0x4b, 0x82, 0x73, 0xd5, 0x69, 0x7d, 0x14, 0x45, 0x8c, - 0xb8, 0x71, 0x87, 0x70, 0x09, 0x26, 0xfc, 0x89, 0x6f, 0x0f, 0xb6, 0xc1, - 0xd6, 0xe1, 0xbf, 0xdb, 0x85, 0x8f, 0x94, 0xad, 0x94, 0x01, 0x01, 0xbb, - 0x3f, 0xc0, 0xb5, 0xff, 0xf5, 0xbb, 0x4f, 0x50, 0x09, 0xca, 0x7d, 0x36, - 0x47, 0x66, 0x9a, 0x8c, 0xee, 0x84, 0x73, 0x9a, 0x1f, 0x49, 0x75, 0xb4, - 0xab, 0x66, 0xf7, 0x3b, 0xfe, 0x81, 0x67, 0xc9, 0xd1, 0x16, 0xde, 0x1f, - 0xc2, 0x24, 0xed, 0x6a, 0x5a, 0xe7, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x18, 0x00, 0x00, 0xc5, 0xd7, 0x14, 0x84, 0xf8, 0xcf, 0x9b, 0xf4, - 0xb7, 0x6f, 0x47, 0x90, 0x47, 0x30, 0x80, 0x4b, 0x9e, 0x32, 0x25, 0xa9, - 0xf1, 0x33, 0xb5, 0xde, 0xa1, 0x68, 0xf4, 0xe2, 0x85, 0x1f, 0x07, 0x2f, - 0xcc, 0x00, 0xfc, 0xaa, 0x7c, 0xa6, 0x20, 0x61, 0x71, 0x7a, 0x48, 0xe5, - 0x2e, 0x29, 0xa3, 0xfa, 0x37, 0x9a, 0x95, 0x3f, 0xaa, 0x68, 0x93, 0xe3, - 0x2e, 0xc5, 0xa2, 0x7b, 0x94, 0x5e, 0x60, 0x5f, 0x10, 0x85, 0xf3, 0x23, - 0x2d, 0x42, 0x4c, 0x13, 0x29, 0xc8, 0x8d, 0x78, 0x6e, 0xd6, 0x8c, 0xe6, - 0xfc, 0xb6, 0x2a, 0xa6, 0x3b, 0xf9, 0xab, 0x61, 0x7c, 0x08, 0x8a, 0x3b, - 0x70, 0xbe, 0x57, 0xaa, 0xda, 0x1f, 0x33, 0x4a, 0x70, 0x17, 0x25, 0x0d, - 0x3f, 0x60, 0x3d, 0xc8, 0x2e, 0xbd, 0x3b, 0x12, 0x0b, 0x63, 0x5e, 0x3f, - 0xf5, 0x6b, 0x1f, 0x0b, 0xd9, 0x33, 0x85, 0x23, 0x71, 0x24, 0x9a, 0xb3, - 0xdf, 0x5c, 0x1f, 0xef, 0x14, 0x33, 0xc8, 0x66, 0x85, 0xb7, 0xf0, 0x56, - 0x68, 0x1d, 0x51, 0x52, 0xaf, 0x80, 0x3c, 0xe2, 0x59, 0x06, 0xf1, 0xd1, - 0x9f, 0xb6, 0xc6, 0x80, 0x4e, 0x06, 0xea, 0x28, 0xab, 0x17, 0x8f, 0x45, - 0x7a, 0xf6, 0xb4, 0x93, 0xb7, 0x43, 0x9e, 0xc6, 0xd4, 0x29, 0x00, 0x62, - 0xab, 0x51, 0x7a, 0x72, 0xe5, 0xc1, 0xd4, 0x10, 0xcd, 0xd6, 0x17, 0x54, - 0xe4, 0x20, 0x84, 0x50, 0xe4, 0xf9, 0x00, 0x13, 0xfd, 0xa6, 0x9f, 0xef, - 0x19, 0xd4, 0x60, 0x2a, 0x42, 0x07, 0xcd, 0xd5, 0xa1, 0x01, 0x6d, 0x07, - 0x01, 0x32, 0x61, 0x3c, 0x65, 0x9a, 0x8f, 0x5d, 0x33, 0xf3, 0xcb, 0x29, - 0x0b, 0x8c, 0xe7, 0x3b, 0x83, 0x44, 0xb1, 0x3a, 0x4f, 0x8e, 0x09, 0x15, - 0x14, 0x69, 0x84, 0xa1, 0xbb, 0x15, 0xfd, 0xea, 0xde, 0xbe, 0x5b, 0x6a, - 0xc0, 0x95, 0x04, 0x46, 0x4d, 0x8a, 0xaa, 0xac, 0xbc, 0x2f, 0xad, 0x12, - 0x15, 0x8a, 0x53, 0x4c, 0x94, 0xb8, 0xca, 0x42, 0x96, 0x3a, 0xf4, 0x7a, - 0x18, 0x9d, 0x5b, 0x24, 0x9a, 0xce, 0xa8, 0x99, 0xd4, 0x37, 0x32, 0xf6, - 0xf2, 0xac, 0xaf, 0x3f, 0xf5, 0x3b, 0xfe, 0xda, 0x13, 0x9a, 0xab, 0x4f, - 0x55, 0xc0, 0x2c, 0x21, 0x2b, 0x65, 0x71, 0x1f, 0xc5, 0x04, 0x32, 0xc9, - 0x94, 0xe5, 0xfa, 0x6f, 0xd8, 0x2a, 0xbc, 0x70, 0x85, 0x55, 0xdc, 0x62, - 0xb7, 0x3a, 0x20, 0x0e, 0xe7, 0x67, 0x3c, 0xfe, 0xcb, 0x83, 0x6a, 0x15, - 0x6e, 0x4a, 0x35, 0x65, 0xea, 0xc1, 0xb9, 0x4d, 0x35, 0xf9, 0x4b, 0xcf, - 0xd8, 0xfd, 0xa5, 0xff, 0xff, 0x67, 0x70, 0x04, 0xae, 0xa2, 0xa4, 0x12, - 0x4b, 0x83, 0x4f, 0xc2, 0x96, 0xf0, 0x21, 0x2b, 0x14, 0x21, 0x73, 0x42, - 0x14, 0x99, 0x07, 0xe5, 0xa9, 0x52, 0x4c, 0xeb, 0xbe, 0xc3, 0x11, 0x2e, - 0x27, 0xda, 0x69, 0x94, 0xd5, 0xf6, 0xc6, 0x77, 0x0a, 0x00, 0x5d, 0x9a, - 0x82, 0xaa, 0x21, 0xfc, 0x86, 0x9b, 0xd0, 0xc4, 0xc4, 0x1f, 0x53, 0x41, - 0x7a, 0x92, 0xab, 0x1c, 0x12, 0xf6, 0xd5, 0x48, 0xfb, 0x29, 0x4d, 0xb4, - 0xd2, 0x12, 0xee, 0xc5, 0xea, 0x18, 0x33, 0xf1, 0x4d, 0x0a, 0x10, 0x43, - 0xa5, 0x35, 0xb1, 0x63, 0xc4, 0xfb, 0x38, 0x1e, 0xef, 0xac, 0x3f, 0x97, - 0x41, 0xc6, 0x96, 0x3e, 0x60, 0x13, 0xc8, 0xe3, 0xbe, 0x61, 0xe9, 0xb6, - 0x26, 0x16, 0x14, 0xf8, 0x82, 0x0d, 0x6e, 0x75, 0x2f, 0xd7, 0x9c, 0x3a, - 0x4a, 0xda, 0xd8, 0x2b, 0x35, 0xd4, 0x20, 0x32, 0xd4, 0x4f, 0x0f, 0xe4, - 0xdc, 0xd5, 0x0f, 0xfe, 0xa6, 0x81, 0x28, 0xb4, 0x24, 0x3e, 0xb7, 0x0f, - 0xb0, 0xb2, 0x5b, 0x05, 0x76, 0xbb, 0x24, 0x49, 0x6a, 0x01, 0x68, 0x3f, - 0x03, 0x96, 0xbc, 0x0c, 0x77, 0x48, 0x5f, 0xe8, 0x39, 0xf4, 0xb0, 0x84, - 0x42, 0x0e, 0x6a, 0xb9, 0xab, 0xf2, 0x95, 0x97, 0xa7, 0x5e, 0x29, 0x34, - 0x9d, 0x50, 0xc0, 0x4b, 0x40, 0x72, 0xa1, 0x7c, 0x79, 0x5e, 0x95, 0xbe, - 0xd6, 0x17, 0x43, 0x0a, 0xc9, 0x27, 0x25, 0x43, 0xd7, 0x99, 0xd5, 0x48, - 0xd8, 0x98, 0xb5, 0x2b, 0x7f, 0xe3, 0xbd, 0x1d, 0xc0, 0xd1, 0x04, 0xd5, - 0xa4, 0xe1, 0x68, 0xbe, 0x96, 0xf1, 0x2e, 0x5e, 0x37, 0x8d, 0x39, 0x4e, - 0xe4, 0xcc, 0x5e, 0xd7, 0xdd, 0x59, 0x7e, 0xe8, 0xae, 0x48, 0xb5, 0xec, - 0x2c, 0xf7, 0x68, 0x96, 0x00, 0xe5, 0xec, 0x03, 0x6f, 0x98, 0x3a, 0x9a, - 0x4f, 0xd9, 0xf1, 0x2f, 0xfe, 0x76, 0xcf, 0x8f, 0x0b, 0x3d, 0x8a, 0x14, - 0x00, 0x83, 0xcb, 0xca, 0xe3, 0x34, 0x81, 0xb5, 0x91, 0x64, 0x2b, 0x12, - 0x24, 0x86, 0x9c, 0xae, 0x3c, 0x7f, 0x53, 0x22, 0xd4, 0x94, 0x90, 0x44, - 0x6b, 0x35, 0xd2, 0xce, 0x8e, 0x95, 0xe2, 0xbe, 0x46, 0x50, 0x3f, 0x3d, - 0xc3, 0xcd, 0xef, 0x47, 0x99, 0xb5, 0xf2, 0xd4, 0x6f, 0xf4, 0xfa, 0xa2, - 0xfc, 0x1e, 0xe3, 0x99, 0x49, 0xfd, 0x1a, 0x6e, 0x0d, 0xb5, 0xf1, 0xc8, - 0x05, 0x22, 0x29, 0xca, 0x03, 0xb8, 0x15, 0x3b, 0x01, 0x8a, 0x95, 0x74, - 0x48, 0x93, 0x61, 0x35, 0xde, 0xeb, 0xa9, 0xc4, 0x56, 0xa9, 0xd7, 0xde, - 0x4b, 0xe5, 0x4b, 0xa1, 0x42, 0x6a, 0x5f, 0xe3, 0xb2, 0xc7, 0xda, 0xfb, - 0xc7, 0x70, 0x64, 0xe0, 0x68, 0x19, 0xc6, 0x11, 0x77, 0x2b, 0x5f, 0xba, - 0x1d, 0x58, 0x77, 0x98, 0x2c, 0x91, 0xb4, 0xd2, 0xea, 0x1b, 0xdc, 0xe8, - 0xfa, 0x82, 0xf3, 0x6e, 0xac, 0x88, 0x15, 0x16, 0x1a, 0x53, 0xb3, 0x01, - 0x94, 0x03, 0x47, 0x20, 0xdb, 0x71, 0xcb, 0x71, 0xe8, 0x62, 0xad, 0x34, - 0x2b, 0xa3, 0xa5, 0xe9, 0xa6, 0x82, 0x0e, 0x16, 0x61, 0xbc, 0x29, 0x6b, - 0xb1, 0x60, 0x67, 0x80, 0x9a, 0x9f, 0xc4, 0x82, 0xf6, 0xb0, 0x7a, 0x16, - 0x9c, 0x25, 0x04, 0xeb, 0xfd, 0xe0, 0x18, 0xd3, 0xfc, 0xeb, 0xe1, 0x3c, - 0x2b, 0x29, 0x7b, 0x32, 0x4e, 0xd3, 0x6d, 0xe1, 0x27, 0xda, 0xc9, 0x14, - 0x5c, 0x7f, 0xfa, 0x70, 0x41, 0x8e, 0xb4, 0xa3, 0xde, 0x36, 0x92, 0x67, - 0x97, 0xe2, 0xec, 0x85, 0x8b, 0x76, 0x08, 0x3c, 0x32, 0x58, 0xd4, 0x7f, - 0x6f, 0x91, 0x03, 0xdb, 0x19, 0x3e, 0xc4, 0x8b, 0x3c, 0xb7, 0x75, 0x90, - 0x71, 0x7a, 0x21, 0x9d, 0xa7, 0x77, 0xbf, 0xf5, 0x92, 0x57, 0x46, 0x07, - 0xa7, 0xbb, 0x0c, 0x42, 0xca, 0x4f, 0x5a, 0x27, 0x45, 0x69, 0xfe, 0x6d, - 0x78, 0x43, 0x77, 0xc4, 0xb4, 0x43, 0xff, 0x37, 0x0d, 0xb7, 0xfa, 0xe9, - 0x9e, 0x06, 0x70, 0x53, 0xfd, 0xf6, 0xa0, 0x28, 0x84, 0x46, 0xcd, 0x61, - 0xa2, 0x95, 0xc4, 0x1e, 0x6a, 0x13, 0xa1, 0x7f, 0xaf, 0xe1, 0x73, 0x85, - 0xb0, 0x53, 0x9c, 0x08, 0xb6, 0x1d, 0x4d, 0xb4, 0x0b, 0xfb, 0x1f, 0x0c, - 0x7b, 0x17, 0x06, 0x73, 0xa7, 0x22, 0x1f, 0xb0, 0xd8, 0x45, 0x6e, 0xe5, - 0xde, 0x48, 0xb7, 0x9f, 0x5a, 0xa8, 0xd1, 0xc3, 0x04, 0xd1, 0x87, 0xec, - 0x15, 0x3e, 0xd1, 0xc7, 0x57, 0x01, 0x46, 0x4b, 0x28, 0xa8, 0x79, 0x5a, - 0x7e, 0x0b, 0x56, 0x56, 0x28, 0xda, 0x35, 0xea, 0x4c, 0x14, 0x81, 0xae, - 0xc0, 0x0d, 0x12, 0xfe, 0x2d, 0xb7, 0x95, 0x4d, 0xea, 0x78, 0xb6, 0x53, - 0xcf, 0xac, 0x8a, 0xfc, 0xc9, 0x07, 0x9f, 0x93, 0xf0, 0x11, 0x86, 0x13, - 0xe9, 0xca, 0x3d, 0xce, 0xb1, 0xfd, 0x1a, 0x0a, 0x8b, 0x11, 0x82, 0x94, - 0x6a, 0xae, 0xc5, 0x80, 0x6a, 0x3b, 0xa8, 0x7c, 0xb4, 0x53, 0x4e, 0xa9, - 0x04, 0x1a, 0x4f, 0xb0, 0xb9, 0x95, 0x96, 0xa5, 0xfd, 0xce, 0xdc, 0x57, - 0x00, 0x48, 0x16, 0xe2, 0x40, 0xae, 0x04, 0xf5, 0x83, 0x60, 0x23, 0xd9, - 0x8e, 0x59, 0x56, 0x20, 0x50, 0x38, 0xc4, 0xde, 0x88, 0x9f, 0x91, 0x06, - 0xdb, 0x8f, 0x84, 0xa2, 0xaf, 0x61, 0xdd, 0x48, 0x03, 0x4f, 0xc4, 0xb8, - 0xed, 0x12, 0xd2, 0x74, 0x08, 0xb9, 0x51, 0x63, 0xb5, 0xfe, 0x09, 0x7f, - 0x7b, 0x8c, 0x5e, 0xd7, 0x27, 0xe5, 0x79, 0xe6, 0x33, 0x60, 0x54, 0xe1, - 0x21, 0xda, 0xca, 0x8b, 0x81, 0xdf, 0xb6, 0xa7, 0x2e, 0x9d, 0x0f, 0xfc, - 0x05, 0x80, 0x67, 0xcb, 0xc5, 0xdf, 0xc7, 0x13, 0xee, 0xb5, 0x40, 0x8e, - 0xa7, 0x0c, 0xcb, 0xf2, 0x45, 0x15, 0x29, 0xb1, 0xb8, 0x02, 0x23, 0x61, - 0x38, 0xf1, 0x16, 0xa1, 0x0c, 0xa1, 0xc9, 0x40, 0x8c, 0xd0, 0x48, 0x4b, - 0xce, 0x9c, 0x1e, 0x53, 0x40, 0x44, 0xf6, 0x17, 0x16, 0xc6, 0x5c, 0xb0, - 0x2a, 0x29, 0x59, 0x87, 0x67, 0x85, 0xa7, 0x81, 0x84, 0xe9, 0x4f, 0xe5, - 0x4e, 0x13, 0x5a, 0x11, 0xa1, 0x24, 0x62, 0xe9, 0x7a, 0xea, 0x51, 0xaa, - 0x45, 0xf3, 0x1d, 0x2a, 0xaf, 0x01, 0x28, 0x35, 0xda, 0xb4, 0xe7, 0xab, - 0xc1, 0xb9, 0x3c, 0x45, 0xa2, 0x0b, 0x5d, 0x40, 0x09, 0xac, 0x62, 0x16, - 0xd3, 0x1f, 0x9f, 0xc7, 0x1a, 0x56, 0xb7, 0x27, 0xd1, 0x1b, 0xe1, 0xb5, - 0x82, 0x9e, 0xe8, 0xd3, 0x5c, 0x0f, 0xe8, 0x87, 0x61, 0xc6, 0x20, 0xb7, - 0x31, 0x3f, 0x0d, 0xb3, 0x0a, 0x5a, 0xce, 0x06, 0xa5, 0xe9, 0xfd, 0xf3, - 0x29, 0x1a, 0xcd, 0x86, 0x0e, 0x31, 0x29, 0xaa, 0xb7, 0x32, 0xf1, 0x10, - 0x4e, 0x92, 0x12, 0x00, 0xc0, 0xac, 0x50, 0x4b, 0x52, 0x59, 0x51, 0x7c, - 0xa8, 0x0c, 0xf7, 0xcb, 0x16, 0x73, 0x7b, 0x90, 0xa8, 0x57, 0x79, 0xb4, - 0x73, 0x53, 0xd7, 0xed, 0xba, 0x46, 0xc5, 0x06, 0x53, 0x02, 0xc7, 0x58, - 0x4c, 0x09, 0x0c, 0xa5, 0x01, 0x13, 0x18, 0x39, 0x4b, 0x4e, 0xc2, 0x0d, - 0xd6, 0xdf, 0xaa, 0x7e, 0x46, 0xba, 0x6e, 0xcc, 0x25, 0x42, 0xd0, 0xb3, - 0x31, 0xdc, 0xdf, 0x7d, 0xf1, 0xc3, 0x73, 0xca, 0x7a, 0xf6, 0xcb, 0x23, - 0x81, 0x8d, 0xbe, 0x0b, 0xf2, 0x79, 0x8d, 0x14, 0xa4, 0xc8, 0x36, 0x18, - 0x49, 0xc8, 0x0d, 0xd7, 0xc9, 0xdd, 0x35, 0xeb, 0xec, 0x52, 0x56, 0xae, - 0xf2, 0xd2, 0x51, 0x91, 0x39, 0xbc, 0xb0, 0x49, 0xb7, 0xf2, 0x1b, 0x64, - 0x83, 0x5a, 0xa6, 0x97, 0xc2, 0x15, 0x95, 0xdc, 0x11, 0xd2, 0x89, 0xc0, - 0x6a, 0xb1, 0x44, 0x43, 0x38, 0xb6, 0x54, 0x0f, 0xdc, 0xcb, 0xed, 0x26, - 0x27, 0xd9, 0x46, 0x56, 0x4e, 0x6a, 0x54, 0x74, 0x0f, 0x45, 0xfc, 0xb6, - 0x93, 0xab, 0x3c, 0xd1, 0x86, 0x51, 0xaf, 0xa9, 0x4a, 0xc0, 0x9c, 0x78, - 0xc1, 0xb1, 0xc7, 0xf1, 0x9c, 0xd1, 0xd0, 0x32, 0x4e, 0x4b, 0x02, 0x36, - 0x68, 0x38, 0x88, 0x56, 0xc0, 0x2b, 0x12, 0x05, 0x3b, 0xb9, 0xf6, 0xa2, - 0x37, 0xe7, 0xbc, 0x81, 0xf9, 0x75, 0x51, 0x27, 0x56, 0x0d, 0x55, 0xd1, - 0x6a, 0xe0, 0xcf, 0x87, 0x0a, 0x44, 0xc6, 0x57, 0xe1, 0x1b, 0xc0, 0x2c, - 0xcf, 0xab, 0x77, 0xe9, 0x14, 0xf5, 0x34, 0x89, 0xfb, 0xc9, 0xf2, 0x87, - 0x5c, 0x75, 0xba, 0x51, 0x9a, 0x49, 0xe9, 0x23, 0x23, 0xf4, 0xc9, 0xd1, - 0x2f, 0x87, 0xf6, 0x75, 0x38, 0x97, 0x48, 0xb8, 0x30, 0x46, 0x1d, 0x46, - 0x65, 0x03, 0x10, 0xcf, 0xfb, 0x36, 0xf2, 0xb1, 0xaf, 0x31, 0x02, 0x7b, - 0x74, 0xfe, 0x9f, 0x8c, 0x73, 0x04, 0xfd, 0xb5, 0xae, 0x2e, 0x27, 0x9c, - 0xd8, 0x73, 0xbc, 0xc3, 0x4a, 0x76, 0x93, 0x66, 0xf6, 0xb7, 0x90, 0xc4, - 0x42, 0x3d, 0xcd, 0xb5, 0xf1, 0x75, 0xbf, 0xb7, 0xdd, 0x8e, 0xb7, 0xcd, - 0x90, 0x35, 0xf5, 0x95, 0x3d, 0xe4, 0x4e, 0xb0, 0x7c, 0x5f, 0xad, 0xff, - 0x75, 0x38, 0xc4, 0xc7, 0xed, 0xec, 0x70, 0xcc, 0x9f, 0xf9, 0x77, 0xa1, - 0x00, 0x2f, 0xf1, 0xa2, 0xc9, 0x74, 0xdc, 0x18, 0x14, 0xd0, 0x2f, 0x86, - 0x66, 0xa7, 0x5b, 0x39, 0x5c, 0xba, 0x0e, 0x77, 0x16, 0x04, 0xc3, 0x02, - 0x42, 0x3b, 0x66, 0x29, 0xee, 0x65, 0x00, 0xd4, 0x22, 0x5a, 0x77, 0x74, - 0xd4, 0xc3, 0xf3, 0x00, 0xdf, 0x6b, 0xc3, 0x15, 0x89, 0x0e, 0xb1, 0xbc, - 0xac, 0xe8, 0x44, 0x2f, 0x80, 0x34, 0x34, 0x8b, 0x0c, 0x48, 0x45, 0xc2, - 0x6a, 0xa3, 0x67, 0xd7, 0x3d, 0x36, 0xf3, 0x3f, 0xe5, 0xf0, 0x5b, 0xe8, - 0xad, 0x41, 0xd5, 0x82, 0xc1, 0x28, 0xab, 0x77, 0xe8, 0x7f, 0xb3, 0xf6, - 0xd2, 0x0c, 0xe4, 0x03, 0xcf, 0xe4, 0x72, 0xdb, 0x7b, 0x81, 0xf4, 0xf3, - 0x48, 0x74, 0xe1, 0x91, 0xb8, 0xf8, 0x4c, 0x2c, 0x60, 0x99, 0x3e, 0x1e, - 0x4f, 0xaf, 0x12, 0xab, 0x52, 0xef, 0xc7, 0x60, 0xd2, 0xfe, 0x62, 0x55, - 0xc8, 0x18, 0xad, 0x60, 0xa7, 0x5d, 0xde, 0x4d, 0xfc, 0x6d, 0xe1, 0x10, - 0x7c, 0xf9, 0xa2, 0x64, 0x00, 0x16, 0x1f, 0x44, 0x7c, 0xe2, 0x72, 0x37, - 0xd9, 0x92, 0xad, 0xfc, 0x62, 0x53, 0xbe, 0xb6, 0xe0, 0xc8, 0xe0, 0xa2, - 0xef, 0x22, 0x4b, 0x70, 0x3a, 0x4f, 0xc9, 0xed, 0x6b, 0xbc, 0x17, 0x0a, - 0xcf, 0x6a, 0x2c, 0xd3, 0xd2, 0x6b, 0x02, 0x45, 0xfa, 0x9e, 0xc2, 0x21, - 0x28, 0xfc, 0x07, 0x68, 0xd6, 0xb8, 0x9f, 0x2a, 0x0b, 0x7a, 0x0e, 0xbc, - 0x4e, 0xee, 0x84, 0x38, 0xe4, 0x8e, 0x70, 0xc3, 0xc4, 0xad, 0x74, 0x87, - 0x2d, 0x16, 0x4f, 0xa1, 0xf8, 0x20, 0xf5, 0xde, 0xa3, 0xc5, 0x0c, 0x3b, - 0xde, 0x44, 0x48, 0x0f, 0x3c, 0xdc, 0x7e, 0x10, 0x8b, 0x87, 0xc4, 0x3b, - 0xb0, 0x95, 0xbf, 0x61, 0x1e, 0xad, 0x07, 0x52, 0xfd, 0x0b, 0x84, 0xa9, - 0x46, 0xb0, 0x32, 0xd5, 0x22, 0x80, 0x35, 0x26, 0x41, 0xf8, 0x11, 0x72, - 0xb1, 0x31, 0x6f, 0x5a, 0x75, 0xcc, 0x67, 0xe0, 0xb2, 0x50, 0x89, 0xb2, - 0x66, 0x6e, 0xee, 0xa0, 0x41, 0x8d, 0x00, 0x2a, 0xa7, 0x9d, 0xa5, 0x11, - 0x2b, 0x07, 0x95, 0x3a, 0x55, 0x8c, 0x67, 0xb1, 0xe5, 0x2d, 0xd4, 0xd1, - 0x3e, 0x29, 0xed, 0xa5, 0x59, 0x97, 0x7b, 0xdf, 0x92, 0x10, 0x0b, 0x04, - 0x89, 0x27, 0xa0, 0xa2, 0x93, 0x18, 0x7f, 0x47, 0x84, 0x1c, 0xc6, 0xd6, - 0x8f, 0x73, 0x81, 0xa0, 0xfa, 0xe5, 0x3e, 0xd8, 0xbf, 0x56, 0x1a, 0x76, - 0xf4, 0xc4, 0x0f, 0x7a, 0x29, 0x9d, 0x32, 0x5d, 0x41, 0xe0, 0x07, 0xb9, - 0xd3, 0x3f, 0x7e, 0xff, 0x90, 0x89, 0xce, 0xdc, 0xf1, 0x1d, 0x54, 0xb6, - 0x67, 0x7f, 0x4d, 0x71, 0x9a, 0x4a, 0x5f, 0x80, 0x0d, 0x5c, 0x77, 0xd5, - 0x50, 0x7c, 0x41, 0x56, 0x7e, 0x99, 0x0a, 0xeb, 0x66, 0x1f, 0xd2, 0x55, - 0xc3, 0xc6, 0x6c, 0xc5, 0xfc, 0x34, 0x40, 0x2c, 0x05, 0x29, 0x05, 0x7c, - 0xca, 0xe6, 0x8d, 0xd3, 0xb0, 0xca, 0x84, 0x27, 0x50, 0x7c, 0x6b, 0x17, - 0x1b, 0x22, 0xe4, 0x7f, 0xe6, 0x44, 0x94, 0x06, 0x4b, 0xb3, 0xb7, 0xbb, - 0x98, 0x81, 0x44, 0x0b, 0xf5, 0x66, 0xcb, 0xad, 0xf2, 0x9a, 0xe1, 0x47, - 0xf3, 0x97, 0xa9, 0xb2, 0xc2, 0xca, 0xcd, 0x98, 0x78, 0x60, 0xdc, 0x6e, - 0x87, 0x55, 0x47, 0xf3, 0xae, 0x84, 0xdd, 0x9a, 0xe9, 0x1a, 0x63, 0x83, - 0xea, 0x23, 0x09, 0x67, 0x34, 0x83, 0x00, 0x6e, 0x5e, 0x58, 0xb8, 0x89, - 0x04, 0x08, 0x0a, 0x55, 0x9e, 0x78, 0xc9, 0xff, 0xb9, 0xb5, 0x2c, 0xdd, - 0x3b, 0x0c, 0x58, 0x07, 0x8b, 0xb4, 0x6a, 0xc4, 0x64, 0xa3, 0x5e, 0x5b, - 0xfe, 0x4d, 0xd0, 0x74, 0x01, 0x1b, 0xdf, 0x10, 0x45, 0x2b, 0xd6, 0x9e, - 0xa9, 0x60, 0x1f, 0xad, 0x46, 0xa1, 0x8c, 0xf8, 0xf6, 0xa9, 0x8a, 0x27, - 0xea, 0x51, 0x37, 0x84, 0xcf, 0xe5, 0xd7, 0x51, 0xd6, 0x40, 0x39, 0x39, - 0x5f, 0xf6, 0x96, 0x33, 0xd9, 0x86, 0x8d, 0x38, 0xb6, 0x26, 0x04, 0x14, - 0x07, 0x46, 0x3e, 0xd0, 0xc5, 0xf6, 0x0d, 0xa0, 0x47, 0x2b, 0xc8, 0x73, - 0x18, 0x6b, 0xd3, 0x0e, 0x18, 0xcc, 0x43, 0x98, 0xd0, 0xcf, 0x1c, 0xe4, - 0x4a, 0x41, 0x6a, 0x56, 0x2d, 0xf0, 0x93, 0x89, 0x81, 0x6c, 0xce, 0x04, - 0x1a, 0x23, 0x05, 0x91, 0x4f, 0x48, 0x44, 0x3a, 0xaa, 0x03, 0xa5, 0x4a, - 0xa9, 0x20, 0x2c, 0xbe, 0x6a, 0x81, 0xe6, 0xa9, 0xf8, 0xf0, 0x2b, 0x29, - 0xa1, 0xe0, 0xc4, 0xce, 0xf5, 0xda, 0x25, 0x70, 0x49, 0xcc, 0xa0, 0x4b, - 0x24, 0x49, 0x4f, 0x11, 0xc4, 0x3b, 0x22, 0x89, 0x9a, 0xb4, 0xf4, 0xcd, - 0xa3, 0xee, 0xb0, 0x76, 0x13, 0xc4, 0xbb, 0xaf, 0x03, 0x7f, 0x27, 0xf3, - 0x38, 0xbc, 0xde, 0x7c, 0x0c, 0x39, 0x14, 0xb7, 0x14, 0xbb, 0x5c, 0xae, - 0x89, 0xf8, 0xf7, 0xd6, 0x00, 0x78, 0xf4, 0xb0, 0x52, 0x16, 0xf5, 0x54, - 0xc5, 0x93, 0xf7, 0x6d, 0x0d, 0xe8, 0x58, 0xe2, 0xa1, 0xa7, 0xdc, 0x49, - 0xdb, 0xc8, 0x79, 0xbc, 0xc3, 0x97, 0x7b, 0x6c, 0x82, 0x7b, 0xbe, 0xe9, - 0x79, 0xac, 0x4a, 0xa4, 0x7c, 0x49, 0x83, 0x58, 0x3a, 0xe4, 0xf5, 0x68, - 0x5c, 0xb7, 0x7f, 0x2d, 0xfe, 0x6b, 0x96, 0xc7, 0x8b, 0x67, 0xb5, 0xd0, - 0xa1, 0x0a, 0x16, 0x62, 0x64, 0x53, 0xea, 0x29, 0x80, 0x93, 0xf9, 0xd6, - 0xa0, 0xc5, 0x1b, 0x3a, 0x1e, 0xab, 0x51, 0x88, 0xe0, 0x9e, 0xd4, 0xf6, - 0xbf, 0x70, 0x2d, 0x29, 0x2e, 0x08, 0xa9, 0x31, 0x78, 0x0a, 0x15, 0x30, - 0x9f, 0x2e, 0xc8, 0x41, 0x65, 0x8e, 0x97, 0x51, 0x5e, 0x73, 0x46, 0x42, - 0x74, 0x84, 0xfd, 0x9b, 0x4a, 0x8a, 0x68, 0x28, 0x45, 0xd0, 0x5d, 0x65, - 0x08, 0xb3, 0xf5, 0x40, 0x8a, 0x29, 0x8e, 0x70, 0x02, 0x49, 0x6a, 0x01, - 0xd6, 0x41, 0x4a, 0xf8, 0x15, 0xa3, 0x70, 0x59, 0xe9, 0xa2, 0xe2, 0x76, - 0x8c, 0x60, 0x33, 0xb3, 0xfa, 0x8b, 0xb4, 0x90, 0x6f, 0x92, 0xc8, 0x21, - 0x59, 0xc0, 0x3a, 0x30, 0x46, 0xeb, 0x49, 0xd8, 0x85, 0x63, 0x5a, 0x23, - 0x87, 0xe1, 0xa7, 0xc0, 0x1a, 0xb0, 0xc7, 0xc4, 0x40, 0x4d, 0x11, 0x9c, - 0xe3, 0xd4, 0x6b, 0xef, 0x68, 0xc8, 0x2c, 0x31, 0xcd, 0x3e, 0xee, 0x55, - 0x10, 0x67, 0x77, 0x7b, 0x30, 0xc1, 0xd0, 0x23, 0x6c, 0x65, 0x6f, 0xfb, - 0x2e, 0x62, 0x33, 0x42, 0x63, 0xdc, 0xca, 0x86, 0xf1, 0x0e, 0xb3, 0xb0, - 0x69, 0x11, 0x65, 0xe1, 0x6e, 0x6c, 0x03, 0x49, 0x79, 0xe8, 0xf1, 0x2e, - 0x8d, 0x94, 0xc8, 0xa8, 0x98, 0x2d, 0x3f, 0xfe, 0xbd, 0x2d, 0x75, 0x45, - 0xd1, 0x7a, 0x09, 0xf8, 0x90, 0x49, 0xbd, 0x4a, 0x3b, 0xa4, 0xa3, 0x26, - 0xb8, 0x62, 0x66, 0x97, 0xd9, 0xc1, 0xca, 0x12, 0x49, 0xe1, 0x27, 0x93, - 0x4f, 0x60, 0xfa, 0xb3, 0x4f, 0x4c, 0xdb, 0x87, 0x6c, 0x3b, 0x50, 0x47, - 0xe2, 0xd8, 0x5b, 0x13, 0x99, 0xf0, 0x2b, 0xbb, 0x32, 0x33, 0xfd, 0x7d, - 0x15, 0x0f, 0x2c, 0xee, 0x85, 0x83, 0xc0, 0x53, 0x79, 0x3e, 0x51, 0xfe, - 0x7c, 0x06, 0x73, 0x49, 0x49, 0x4f, 0x5a, 0x22, 0x36, 0x8f, 0x30, 0x8a, - 0xef, 0x84, 0xd6, 0x15, 0x26, 0x48, 0xe7, 0x1e, 0xb1, 0xaa, 0x82, 0xd0, - 0xc7, 0x0b, 0x97, 0x7b, 0x6c, 0x2d, 0x49, 0x7e, 0x6d, 0xe7, 0xa3, 0x05, - 0x80, 0xd7, 0x42, 0xa9, 0xc6, 0x66, 0x98, 0x30, 0xe3, 0x8a, 0x79, 0x86, - 0x9c, 0x2b, 0xbc, 0x4a, 0xe6, 0x0d, 0xc5, 0xe5, 0x1a, 0x92, 0xd9, 0xef, - 0x63, 0x52, 0x03, 0x88, 0x36, 0xc5, 0x83, 0x65, 0xf8, 0xf1, 0x87, 0xce, - 0x43, 0xfe, 0x89, 0x58, 0x07, 0x6a, 0xad, 0x85, 0x37, 0x0f, 0xdf, 0x9e, - 0xa5, 0x62, 0xa9, 0xd2, 0x41, 0x3f, 0x7f, 0xb7, 0xf1, 0xe2, 0x58, 0xb5, - 0xda, 0xdf, 0xd1, 0xba, 0x36, 0x2c, 0xe7, 0x43, 0x31, 0x07, 0xc5, 0xf5, - 0x79, 0xc9, 0x31, 0xd7, 0x1d, 0x97, 0x57, 0x9a, 0x8e, 0x3f, 0xac, 0x00, - 0x49, 0x00, 0x2f, 0xad, 0xac, 0xe7, 0x65, 0x7c, 0xbf, 0xec, 0x85, 0x57, - 0xe6, 0xcc, 0x07, 0x34, 0x02, 0x36, 0xa8, 0x6a, 0x9f, 0x3a, 0x9a, 0x2f, - 0x34, 0x93, 0x1f, 0x7d, 0x38, 0x54, 0xe3, 0x54, 0x54, 0xee, 0x84, 0x55, - 0xe1, 0x0d, 0xc1, 0x08, 0x3e, 0x33, 0x9e, 0x2a, 0xc3, 0x6a, 0x83, 0xc4, - 0x75, 0xed, 0xbc, 0x5f, 0xd9, 0x04, 0xd7, 0x77, 0x91, 0xb1, 0xa0, 0xf2, - 0xef, 0x81, 0xb0, 0x8b, 0x53, 0x5f, 0x71, 0xec, 0xa5, 0x0b, 0xbe, 0xf2, - 0x92, 0x7e, 0x0a, 0x34, 0xeb, 0x5d, 0x65, 0xc7, 0xa9, 0x44, 0x10, 0xfb, - 0xd3, 0xef, 0xe1, 0xbc, 0x06, 0x65, 0x68, 0x22, 0xfb, 0x43, 0x2c, 0xcf, - 0x8e, 0x6a, 0x28, 0xdb, 0x0b, 0xf4, 0xaf, 0x01, 0x65, 0x97, 0xd6, 0xe5, - 0x91, 0x20, 0x13, 0x2c, 0xb1, 0xc2, 0xd3, 0xc3, 0x76, 0x90, 0xf8, 0xcd, - 0x00, 0xde, 0x93, 0xf8, 0x4e, 0xcc, 0xdc, 0xca, 0x9a, 0xf0, 0xbd, 0x9b, - 0xd6, 0x57, 0xb1, 0x13, 0xd9, 0xe0, 0xe1, 0x9e, 0x21, 0x74, 0xa9, 0x76, - 0xc0, 0x0c, 0xad, 0x4f, 0x5d, 0xfe, 0x23, 0x32, 0x5a, 0x10, 0x75, 0x5b, - 0x05, 0xdf, 0xdc, 0x5b, 0x94, 0xcb, 0xe1, 0x9f, 0x13, 0x51, 0xf5, 0x50, - 0x36, 0x3b, 0xf2, 0x90, 0x9c, 0x9a, 0xc8, 0x10, 0x88, 0xa9, 0xec, 0x22, - 0x1e, 0x96, 0x70, 0xe8, 0x9e, 0x69, 0xc1, 0x22, 0xd9, 0x14, 0x15, 0x2e, - 0xbc, 0x03, 0x96, 0x9e, 0x1d, 0x00, 0x10, 0x16, 0x4f, 0x56, 0xf0, 0x29, - 0x47, 0x0a, 0x45, 0x34, 0x27, 0x21, 0x3b, 0x67, 0x33, 0xf9, 0xdd, 0x29, - 0x3a, 0xf2, 0xe4, 0x56, 0x34, 0x46, 0xbe, 0xd8, 0x42, 0x29, 0x11, 0x7f, - 0x30, 0xc1, 0xbe, 0xa5, 0xc8, 0x9d, 0x7b, 0x2e, 0x4e, 0xcf, 0xba, 0x91, - 0xb4, 0xbf, 0x0a, 0x04, 0x00, 0x49, 0x83, 0x6b, 0x46, 0x5f, 0x3b, 0xfa, - 0xf7, 0x40, 0x8d, 0x85, 0x47, 0x14, 0x58, 0xb3, 0xa5, 0x66, 0x30, 0xfd, - 0x4a, 0x80, 0xa4, 0x61, 0x3b, 0x7c, 0xb4, 0xcc, 0x34, 0x8c, 0xc6, 0xb6, - 0x10, 0xa9, 0x76, 0xc9, 0x11, 0xd7, 0x8a, 0x51, 0x86, 0x17, 0x89, 0x28, - 0xab, 0xd5, 0x03, 0x88, 0x74, 0x5b, 0x81, 0xbd, 0x3a, 0x57, 0xfe, 0x66, - 0x25, 0xd0, 0x92, 0x15, 0x84, 0x02, 0x0f, 0x51, 0xa8, 0x58, 0xcf, 0x77, - 0x65, 0x10, 0x61, 0xe8, 0xe6, 0xab, 0xb1, 0xba, 0x3b, 0x08, 0xd6, 0xba, - 0x5f, 0xf5, 0x74, 0xc5, 0x07, 0x60, 0xfd, 0xd3, 0xc8, 0x52, 0x4e, 0xdb, - 0xc3, 0xe3, 0x6d, 0x81, 0x20, 0x51, 0x01, 0x9a, 0x5e, 0x32, 0x4e, 0x80, - 0x5a, 0xcb, 0x83, 0xd7, 0xa4, 0xd9, 0xfb, 0xed, 0x3d, 0x80, 0xa1, 0x83, - 0x81, 0x91, 0xc0, 0x0b, 0xff, 0x67, 0xd8, 0x8b, 0xd0, 0x12, 0x0b, 0xd4, - 0x2b, 0x8e, 0x0d, 0x0f, 0xfc, 0xc7, 0xb3, 0xf1, 0xe3, 0xf3, 0x5e, 0x0c, - 0xb6, 0x6b, 0x9d, 0xdc, 0x22, 0x70, 0x31, 0x54, 0xe8, 0x41, 0xfe, 0xa1, - 0xe1, 0x4f, 0xfa, 0x81, 0xfb, 0xae, 0x72, 0x16, 0xb8, 0x87, 0xc9, 0x31, - 0x9d, 0x42, 0x47, 0x4a, 0x20, 0xae, 0x63, 0x16, 0x0d, 0xfa, 0xf1, 0x27, - 0x19, 0x47, 0xee, 0x45, 0x84, 0x29, 0x9a, 0xb6, 0x42, 0xef, 0xbd, 0x15, - 0xa8, 0x34, 0x33, 0x38, 0x9c, 0x9d, 0xbb, 0x5c, 0x03, 0xf3, 0xcf, 0xcf, - 0x6d, 0x2e, 0xd5, 0x88, 0xf8, 0xdd, 0xfc, 0xc0, 0x4a, 0xdb, 0x69, 0xd9, - 0x62, 0x89, 0x24, 0x46, 0xee, 0xa4, 0xb9, 0x95, 0xe6, 0xaf, 0x7d, 0x53, - 0xec, 0x41, 0xae, 0x70, 0xfe, 0x4f, 0x31, 0xe3, 0xa2, 0x59, 0x2c, 0xa1, - 0x53, 0x8b, 0xb6, 0x3b, 0x39, 0xc1, 0xa4, 0xa7, 0x9e, 0xaa, 0x00, 0x60, - 0x9a, 0x5f, 0x56, 0x51, 0xf3, 0x7b, 0x28, 0x84, 0x36, 0x1a, 0xc1, 0x2d, - 0xc8, 0xed, 0xf8, 0x48, 0x48, 0x1d, 0x39, 0x4d, 0x3d, 0xce, 0x30, 0x90, - 0x29, 0x33, 0x6f, 0x9a, 0xce, 0x58, 0xe7, 0x88, 0xac, 0x59, 0xce, 0x85, - 0x5a, 0x52, 0x2b, 0x6c, 0xb7, 0xe9, 0x2e, 0xa9, 0xd9, 0x9a, 0xea, 0x1c, - 0x47, 0xb2, 0x59, 0xff, 0x73, 0x76, 0x21, 0x40, 0xe1, 0xde, 0x32, 0xb8, - 0x73, 0x3d, 0xa5, 0x44, 0x66, 0x79, 0xa1, 0xfe, 0xaf, 0xf6, 0x8a, 0x97, - 0x09, 0x5c, 0x8b, 0x64, 0x38, 0x9f, 0xe1, 0x59, 0x38, 0x18, 0xe9, 0xc0, - 0xd6, 0xa2, 0xac, 0x74, 0xa9, 0xfd, 0x4a, 0x0d, 0xf6, 0x47, 0x00, 0x2b, - 0x09, 0x46, 0x38, 0x1c, 0xa4, 0x9f, 0x63, 0x20, 0x18, 0x75, 0x5a, 0xb8, - 0xc4, 0xbc, 0xd6, 0x6b, 0xc8, 0x14, 0x72, 0x03, 0xe4, 0x05, 0xd4, 0x4e, - 0x66, 0x20, 0x42, 0xa2, 0x8f, 0x96, 0xe7, 0xaf, 0xd3, 0xfb, 0xa8, 0x88, - 0x9b, 0xe3, 0xaa, 0xcd, 0xab, 0xce, 0x8f, 0x07, 0x6d, 0xef, 0x98, 0xce, - 0xdb, 0x42, 0x5b, 0xf4, 0x61, 0x57, 0x62, 0x27, 0x8a, 0x53, 0x5e, 0xf8, - 0x3e, 0xf6, 0x7f, 0xde, 0x5e, 0x3b, 0x1b, 0x13, 0x2e, 0x30, 0x46, 0x4b, - 0x6b, 0xb7, 0xbb, 0x33, 0x31, 0xc0, 0xfa, 0x40, 0xab, 0x68, 0x72, 0xe3, - 0x92, 0x30, 0x47, 0xd6, 0x30, 0x60, 0x42, 0x5b, 0x88, 0x8d, 0xa6, 0x56, - 0xe4, 0xac, 0x33, 0x2e, 0xca, 0x05, 0x1f, 0x60, 0xaf, 0xde, 0x7f, 0xa9, - 0xda, 0x3f, 0xa8, 0x21, 0xf6, 0xfc, 0x98, 0x7d, 0xc4, 0x1e, 0xb0, 0xa9, - 0x56, 0x2d, 0x8d, 0xea, 0x03, 0x51, 0x48, 0xac, 0xe8, 0x22, 0xc7, 0x8b, - 0xef, 0x91, 0x0e, 0xcf, 0x0c, 0xe9, 0x38, 0x43, 0x99, 0xa8, 0x98, 0x4f, - 0xfa, 0xe3, 0x03, 0xa6, 0x4f, 0xd4, 0x0d, 0x98, 0x5b, 0x50, 0x28, 0xd7, - 0xe7, 0x46, 0xd7, 0xad, 0x43, 0xb8, 0x56, 0x2a, 0x2f, 0x7c, 0x39, 0x67, - 0xf4, 0x62, 0x0e, 0xc0, 0xa8, 0x87, 0xb5, 0x81, 0xe2, 0x13, 0x9f, 0xe4, - 0xdd, 0x72, 0xf2, 0x07, 0xca, 0xac, 0x6d, 0xb2, 0x96, 0x53, 0x5a, 0x8f, - 0x66, 0x3c, 0xb4, 0xc1, 0x4f, 0x9a, 0x82, 0x55, 0xcf, 0x0e, 0x27, 0x5f, - 0xc7, 0xd2, 0x28, 0x27, 0x7f, 0x22, 0x6e, 0xa5, 0xe7, 0x32, 0x56, 0x51, - 0x18, 0xe0, 0x85, 0x6d, 0x1f, 0xfc, 0x25, 0x08, 0x18, 0x60, 0x57, 0xfc, - 0x66, 0x94, 0x2c, 0x4c, 0xbe, 0x00, 0xab, 0x9e, 0x73, 0x9b, 0x06, 0xd3, - 0xb5, 0x24, 0xa8, 0x8f, 0xb1, 0x33, 0x99, 0x4c, 0xb4, 0x13, 0x07, 0xcd, - 0x04, 0xdd, 0x77, 0xdc, 0xee, 0x96, 0x02, 0x59, 0xe8, 0x22, 0x07, 0x16, - 0x2e, 0x41, 0xc9, 0xc4, 0x59, 0x70, 0x37, 0x0f, 0x14, 0xc9, 0xcf, 0x90, - 0x57, 0xc2, 0x0d, 0xa3, 0xd7, 0x66, 0xb6, 0x7d, 0x10, 0xd4, 0xfc, 0x18, - 0x66, 0xad, 0xea, 0x5e, 0x64, 0x6c, 0x12, 0x66, 0x3d, 0x96, 0xa5, 0xa8, - 0x9c, 0x49, 0x5c, 0xd4, 0x8d, 0x1c, 0xc3, 0x38, 0xfe, 0x53, 0xc2, 0x71, - 0xd1, 0xc6, 0x41, 0xe2, 0xb9, 0x17, 0x74, 0x6e, 0xcc, 0xf8, 0x72, 0x28, - 0x38, 0x4e, 0x54, 0x9b, 0x0e, 0xa3, 0x3a, 0x43, 0x5c, 0xd5, 0x83, 0x06, - 0xbb, 0x46, 0x16, 0x6e, 0xe3, 0x8a, 0xd5, 0x1e, 0x7f, 0x88, 0x62, 0xac, - 0x35, 0x89, 0xfb, 0xbe, 0x96, 0x1d, 0x87, 0x37, 0xb7, 0x91, 0x63, 0xae, - 0x77, 0x7b, 0x66, 0x60, 0xc1, 0x3e, 0x80, 0x56, 0xb1, 0xc8, 0x0d, 0x16, - 0xde, 0x38, 0x82, 0x66, 0x99, 0x2b, 0x35, 0xd8, 0xb4, 0x5b, 0x4b, 0x3e, - 0x93, 0x96, 0x59, 0xf8, 0x96, 0x7e, 0x7b, 0x27, 0xf4, 0x62, 0xb7, 0xda, - 0x89, 0xa7, 0x34, 0x47, 0xed, 0xb3, 0x42, 0x20, 0xeb, 0xcd, 0xf6, 0xa3, - 0x9f, 0xf7, 0x48, 0x91, 0x17, 0xd2, 0x21, 0xed, 0x5a, 0x22, 0x39, 0xc9, - 0x76, 0x95, 0x36, 0xd9, 0x97, 0x0f, 0x19, 0xce, 0xd3, 0xbc, 0x74, 0x7d, - 0x53, 0x37, 0x3b, 0x4a, 0x97, 0xb7, 0xf8, 0x7e, 0xdd, 0x4c, 0x5f, 0xae, - 0x5c, 0x0b, 0xab, 0x4c, 0x34, 0xa1, 0x7e, 0x34, 0x35, 0xf4, 0xfc, 0x92, - 0xab, 0x2e, 0x6a, 0x15, 0xce, 0x84, 0xae, 0x70, 0xae, 0x85, 0x21, 0xe6, - 0x41, 0x13, 0x31, 0xe0, 0x8f, 0xab, 0x82, 0xe3, 0x09, 0xaf, 0xa4, 0x7c, - 0xb4, 0xb9, 0xb7, 0xc0, 0x67, 0x08, 0xc9, 0x9d, 0xcd, 0x0b, 0x3c, 0xa0, - 0x0c, 0xde, 0x49, 0x2f, 0x40, 0x19, 0x95, 0x64, 0xb9, 0x7c, 0x2a, 0x72, - 0xdd, 0xa2, 0x92, 0x0a, 0x21, 0xeb, 0x8c, 0xc3, 0x6d, 0x52, 0xe7, 0x05, - 0x50, 0x01, 0x55, 0x19, 0x2f, 0xbd, 0x1b, 0x72, 0x73, 0xfe, 0x82, 0x9f, - 0xbf, 0xa0, 0xfe, 0x19, 0x7c, 0x42, 0x6d, 0x76, 0x32, 0x47, 0x36, 0x15, - 0x2e, 0xde, 0xe8, 0xe6, 0xca, 0x07, 0xa3, 0x6b, 0x40, 0x99, 0x96, 0xcd, - 0x19, 0xea, 0x7e, 0xc9, 0x87, 0x9d, 0x3d, 0xa0, 0x82, 0x88, 0xe7, 0xe4, - 0x34, 0x9f, 0xa5, 0x27, 0xdf, 0xae, 0x03, 0x37, 0xa8, 0x35, 0x64, 0x02, - 0x09, 0x09, 0x9e, 0xec, 0x38, 0x0a, 0xff, 0x79, 0x8c, 0x9a, 0x87, 0x66, - 0xcd, 0xe4, 0xf4, 0x9d, 0xa9, 0x07, 0x96, 0x36, 0xae, 0x2e, 0x4e, 0xc5, - 0xe9, 0x86, 0xb2, 0x8e, 0x71, 0x5d, 0xe8, 0xee, 0x84, 0xf3, 0x30, 0x2a, - 0x58, 0x1a, 0x80, 0xb8, 0xaa, 0xb8, 0x1d, 0xc4, 0xae, 0x59, 0x91, 0xf3, - 0x16, 0x9b, 0xa3, 0x8a, 0xa3, 0x26, 0xb2, 0x0a, 0xe5, 0x58, 0xb7, 0x96, - 0x87, 0xfb, 0x00, 0xe4, 0x50, 0x7c, 0xb1, 0x77, 0x3a, 0x18, 0xc2, 0xe3, - 0xc1, 0x12, 0xa6, 0x0d, 0x06, 0xeb, 0x80, 0x6c, 0x5a, 0xee, 0x34, 0xcc, - 0x1c, 0x87, 0x35, 0x46, 0x1d, 0x05, 0x83, 0xd8, 0x91, 0x22, 0xaa, 0xf6, - 0xad, 0x87, 0xab, 0x76, 0x18, 0x79, 0xe2, 0x09, 0xc3, 0xa3, 0x15, 0x67, - 0x3a, 0x7c, 0x0f, 0xa0, 0x4c, 0x7b, 0xfc, 0xfc, 0xdd, 0x5c, 0xe4, 0x86, - 0x58, 0x13, 0xb8, 0x97, 0xae, 0x8c, 0x75, 0xc8, 0x02, 0x1e, 0x33, 0x45, - 0xa9, 0x54, 0x09, 0x15, 0x53, 0x4f, 0x28, 0x47, 0x4d, 0x5f, 0xd0, 0xc7, - 0x09, 0xbd, 0x93, 0xb0, 0x08, 0x79, 0x05, 0xbc, 0xbc, 0xaf, 0x2c, 0xbd, - 0xbb, 0x21, 0xd1, 0x60, 0xb8, 0x81, 0x4c, 0x6c, 0x5e, 0x45, 0x39, 0xa3, - 0x31, 0x54, 0xb7, 0x82, 0xef, 0x86, 0xe4, 0x5e, 0xca, 0xd6, 0xb8, 0x31, - 0xa2, 0x4c, 0x84, 0x5b, 0xac, 0xe5, 0x29, 0xbf, 0xbf, 0x89, 0xb4, 0x4c, - 0xd3, 0x69, 0x66, 0x50, 0xeb, 0xda, 0x7d, 0x00, 0xbb, 0x45, 0x0f, 0xe1, - 0xd1, 0x30, 0x1a, 0xc6, 0x94, 0x66, 0xdc, 0x01, 0x75, 0xce, 0xf8, 0xfc, - 0xd9, 0xce, 0xcf, 0x1f, 0x9e, 0x5a, 0x55, 0xa4, 0x3e, 0xe6, 0x51, 0xc7, - 0x74, 0x40, 0x82, 0x09, 0xea, 0xa0, 0xf5, 0xb2, 0x70, 0x9f, 0x0e, 0xfb, - 0x46, 0x8a, 0x69, 0xbf, 0x07, 0x92, 0xdc, 0x74, 0x03, 0x70, 0xc6, 0x44, - 0x81, 0x66, 0x40, 0xc7, 0xf5, 0xb8, 0xf0, 0x45, 0x0f, 0xca, 0xd8, 0xb0, - 0x9e, 0x48, 0x94, 0xff, 0x85, 0xcb, 0x7b, 0xec, 0x67, 0x5d, 0xfe, 0xe9, - 0x13, 0xd1, 0x67, 0x95, 0xd9, 0x35, 0x9e, 0x8a, 0x53, 0x4d, 0x6b, 0x9d, - 0x42, 0x53, 0xb1, 0x6b, 0x51, 0x1e, 0x35, 0x40, 0x81, 0x92, 0x91, 0x5f, - 0x1f, 0x8e, 0xbe, 0x37, 0xd3, 0x85, 0xab, 0x85, 0x37, 0x1c, 0x0f, 0xae, - 0xd9, 0xf7, 0xa2, 0x75, 0x3d, 0xd9, 0xd7, 0x2a, 0x80, 0xb0, 0x4c, 0x14, - 0x04, 0x40, 0xc5, 0xba, 0x0e, 0xbe, 0xab, 0xcc, 0x38, 0x35, 0x62, 0x6c, - 0xa5, 0xce, 0x49, 0x15, 0x2a, 0x10, 0xb5, 0x6a, 0xd2, 0x3b, 0xd2, 0x6a, - 0xad, 0x2e, 0x34, 0x46, 0x8b, 0x78, 0x57, 0x6e, 0xc4, 0xde, 0x65, 0x68, - 0x05, 0x8f, 0xd6, 0x6e, 0x34, 0xb9, 0xaa, 0x80, 0x77, 0xff, 0x6c, 0x1a, - 0x37, 0x87, 0xdd, 0x33, 0x13, 0x33, 0xa7, 0xa9, 0x3a, 0x90, 0x32, 0x7b, - 0x9b, 0x21, 0x31, 0xc8, 0xf5, 0x4c, 0xa6, 0x73, 0x42, 0x79, 0x46, 0x14, - 0x1b, 0xef, 0xf4, 0x78, 0xd9, 0x7e, 0x6f, 0x31, 0xaa, 0x59, 0x97, 0x34, - 0xe5, 0xe6, 0x67, 0xf3, 0x86, 0xf5, 0x61, 0xe7, 0x51, 0x6d, 0xce, 0xb3, - 0xdc, 0x86, 0xc7, 0x55, 0x43, 0xfa, 0x38, 0x78, 0xb0, 0x8d, 0x03, 0x9c, - 0xe4, 0x6c, 0xca, 0x73, 0x94, 0xa1, 0x0c, 0xb8, 0x11, 0xda, 0x0c, 0x0b, - 0x18, 0x1b, 0xd0, 0x99, 0xe7, 0xa9, 0x0d, 0xc3, 0x36, 0xd7, 0x8c, 0x16, - 0xad, 0x16, 0x1f, 0xb2, 0x3c, 0x07, 0x32, 0x11, 0x6c, 0xd2, 0x8f, 0x33, - 0x37, 0x5c, 0x3e, 0x4f, 0x7a, 0x76, 0xf7, 0x85, 0xcc, 0x68, 0x1a, 0xf9, - 0x26, 0x74, 0x42, 0xc9, 0xea, 0x21, 0x7e, 0x74, 0x3c, 0x4f, 0xde, 0xfb, - 0xd7, 0x83, 0x62, 0x12, 0xc7, 0x4f, 0xfc, 0x47, 0x18, 0x9d, 0xc5, 0xf5, - 0xe9, 0xd7, 0xaa, 0x76, 0x20, 0x99, 0x79, 0xae, 0x9b, 0x7a, 0xde, 0x8b, - 0x95, 0xc2, 0xa5, 0xa3, 0x6a, 0x30, 0x9b, 0x99, 0x63, 0x34, 0x7c, 0xd1, - 0x53, 0xa1, 0x6c, 0xd6, 0xed, 0x7d, 0x8c, 0xba, 0xc8, 0x21, 0xf3, 0xe1, - 0x31, 0x55, 0x3d, 0x88, 0x87, 0x04, 0xc7, 0xc9, 0x65, 0x0c, 0x53, 0x1e, - 0xd4, 0xd9, 0xaa, 0xda, 0xc2, 0x14, 0x88, 0xf2, 0x07, 0x2c, 0x12, 0x4d, - 0x79, 0x54, 0xaa, 0xd9, 0x47, 0x95, 0xf9, 0x7e, 0x26, 0x89, 0x4b, 0x63, - 0x7e, 0x44, 0x06, 0x0e, 0xe2, 0x8d, 0x9a, 0x0a, 0xc3, 0xee, 0x55, 0x13, - 0x55, 0x04, 0xcc, 0xb5, 0x2e, 0xa0, 0x0d, 0xec, 0x76, 0x84, 0xc1, 0x1e, - 0xdd, 0xe6, 0xfa, 0x54, 0x6e, 0x38, 0x30, 0x6f, 0xcc, 0xa4, 0x8d, 0x76, - 0x1e, 0xa3, 0x8e, 0x2c, 0x5e, 0x37, 0xeb, 0x0b, 0xf4, 0xb5, 0x80, 0xde, - 0x58, 0x13, 0x5a, 0x52, 0xdc, 0x65, 0x99, 0x1a, 0x1b, 0x75, 0x0c, 0xbd, - 0x83, 0xe8, 0x90, 0x8e, 0xa9, 0xbf, 0x42, 0x22, 0xe1, 0x3a, 0x31, 0x4e, - 0x54, 0xad, 0xd4, 0x6f, 0x80, 0xb4, 0xb5, 0x82, 0x05, 0x20, 0xd7, 0x38, - 0xd7, 0xeb, 0x25, 0x33, 0xe9, 0x4b, 0xc3, 0x5e, 0xd1, 0x11, 0xb0, 0xd9, - 0x8e, 0x90, 0x48, 0x2a, 0xe3, 0xa0, 0x60, 0x16, 0x70, 0xe3, 0xd1, 0x45, - 0x11, 0x64, 0x91, 0x69, 0x87, 0x1c, 0xbb, 0x91, 0xc4, 0x43, 0x12, 0x62, - 0x99, 0x69, 0xe5, 0x96, 0x01, 0x15, 0xdb, 0xdf, 0x05, 0x55, 0x34, 0xbb, - 0xd6, 0x76, 0x89, 0xcd, 0xb5, 0x4f, 0x2e, 0xa7, 0x6e, 0x15, 0xc9, 0xc0, - 0x8e, 0xa8, 0x63, 0x79, 0x12, 0xfb, 0x7e, 0x69, 0x8f, 0x52, 0x5e, 0xe7, - 0x76, 0x16, 0x28, 0x76, 0xca, 0xcb, 0xd8, 0x0e, 0x4a, 0x93, 0x9d, 0x16, - 0x68, 0x98, 0xf8, 0xc3, 0x39, 0xb2, 0x2d, 0xea, 0xba, 0x72, 0x16, 0x33, - 0xb7, 0xec, 0x61, 0x9e, 0x94, 0x32, 0x01, 0x22, 0xde, 0x66, 0xfd, 0x68, - 0xfa, 0xcf, 0xf2, 0x52, 0x4f, 0x02, 0xe8, 0x25, 0xd3, 0xa3, 0x5b, 0x29, - 0xae, 0xe9, 0x62, 0xfa, 0xd6, 0x1a, 0x50, 0x80, 0x95, 0x96, 0xdf, 0x00, - 0xfc, 0x23, 0xf1, 0x95, 0xef, 0xbb, 0xf5, 0x23, 0x9d, 0x6b, 0xd6, 0xed, - 0xb4, 0xe2, 0x4a, 0xf6, 0xb8, 0x20, 0x83, 0x6b, 0x45, 0x92, 0x29, 0x5a, - 0x02, 0xe9, 0xf7, 0x8e, 0x5c, 0x02, 0xde, 0xb4, 0x9a, 0xdf, 0x18, 0x10, - 0x17, 0x7f, 0xd8, 0x2e, 0x17, 0xc0, 0xf0, 0x6b, 0x3b, 0x88, 0x09, 0x58, - 0xf2, 0x18, 0x22, 0x09, 0x80, 0x4a, 0xe0, 0x51, 0x6f, 0x7a, 0x70, 0x09, - 0x1f, 0xe5, 0xfa, 0xa9, 0x4d, 0x24, 0x1f, 0x18, 0x1c, 0x74, 0xcd, 0x87, - 0x04, 0xfd, 0x85, 0x33, 0x4c, 0x28, 0xbd, 0xa3, 0x66, 0x6c, 0x99, 0x7e, - 0x50, 0x5e, 0xb5, 0x22, 0x33, 0x92, 0xd4, 0xd8, 0x82, 0x4e, 0x38, 0xbe, - 0xcb, 0x3d, 0x5f, 0x19, 0xd1, 0x0f, 0x8b, 0xa1, 0x78, 0x08, 0x1c, 0x10, - 0x0b, 0x77, 0xa7, 0x39, 0x2e, 0x91, 0x83, 0xee, 0x1d, 0x36, 0xd8, 0x77, - 0x87, 0x8a, 0x38, 0x45, 0x3c, 0xbd, 0xb9, 0x88, 0xbb, 0x1b, 0x20, 0xd1, - 0x95, 0xb9, 0x8f, 0x03, 0x46, 0xfa, 0xab, 0x70, 0x68, 0x26, 0xd9, 0xb1, - 0x25, 0x52, 0x5a, 0x77, 0x2d, 0x92, 0xc2, 0x1d, 0xb6, 0x6e, 0xec, 0x67, - 0xef, 0x34, 0xe2, 0x64, 0xb3, 0xa0, 0xae, 0x0c, 0xd9, 0x36, 0xa1, 0xc7, - 0xd8, 0xbf, 0x7a, 0x43, 0xbf, 0xc0, 0xc6, 0x90, 0x60, 0x6a, 0x23, 0xc0, - 0x6a, 0x5d, 0x62, 0x18, 0xac, 0xc1, 0x20, 0x35, 0x17, 0xba, 0x4e, 0x54, - 0xb7, 0xec, 0xd4, 0xad, 0x99, 0x94, 0xa4, 0xda, 0x57, 0xe7, 0x46, 0xed, - 0x47, 0xd1, 0xb4, 0xa2, 0x3e, 0x0f, 0x4a, 0xb6, 0xa6, 0x68, 0x3e, 0x94, - 0xb9, 0x18, 0x30, 0xe0, 0x75, 0x08, 0xe8, 0xf3, 0x21, 0x79, 0x26, 0x68, - 0x6a, 0x65, 0xb6, 0xbe, 0x03, 0x98, 0x8f, 0x04, 0xad, 0x1e, 0xb0, 0x54, - 0xd2, 0x28, 0xdd, 0x4a, 0xe9, 0xf3, 0xa0, 0x06, 0xbf, 0x0b, 0x2a, 0xee, - 0xf8, 0x03, 0x7e, 0x1d, 0x37, 0xc1, 0x32, 0xd1, 0x41, 0xf4, 0x9b, 0xc5, - 0x02, 0x10, 0x6f, 0x55, 0x5a, 0xec, 0x5b, 0xe7, 0x61, 0x05, 0x17, 0xf0, - 0xf8, 0xc6, 0x89, 0xe8, 0xad, 0x32, 0x57, 0x14, 0xe5, 0xf8, 0xf5, 0x88, - 0xd9, 0x73, 0x17, 0x10, 0xa7, 0xc3, 0xf8, 0x78, 0x0b, 0x66, 0xab, 0x63, - 0x4f, 0x96, 0x5d, 0xdf, 0x36, 0x83, 0xc4, 0x6f, 0x20, 0xbd, 0xcb, 0x4c, - 0xd2, 0xfa, 0x35, 0x87, 0xd8, 0xb6, 0xbb, 0xcc, 0xb6, 0xd2, 0x85, 0x03, - 0x6a, 0xea, 0xbb, 0x6d, 0x2f, 0xa2, 0x06, 0xc0, 0xd6, 0x68, 0xd9, 0x7f, - 0xd6, 0xa2, 0x3b, 0x08, 0x6a, 0x98, 0x26, 0x6d, 0x9a, 0x2b, 0x68, 0x51, - 0x78, 0xde, 0xa6, 0x96, 0x50, 0x7b, 0xfc, 0x03, 0x43, 0xf8, 0x21, 0x01, - 0x9d, 0xe2, 0x89, 0x65, 0x47, 0xae, 0x9c, 0x45, 0x5e, 0xa5, 0xce, 0x97, - 0xb3, 0xe6, 0xf6, 0xd4, 0x5a, 0xe8, 0x6b, 0x87, 0xd6, 0xdf, 0xfb, 0x1f, - 0xaf, 0xfb, 0xaf, 0x19, 0xa5, 0xfd, 0xba, 0xe0, 0x22, 0x2f, 0x91, 0x97, - 0xdf, 0xae, 0xe9, 0x39, 0xb1, 0xe4, 0xd3, 0x10, 0xcb, 0xb3, 0x03, 0xb5, - 0x0b, 0xf0, 0xd9, 0x70, 0x1e, 0x9c, 0x63, 0x6f, 0x3a, 0xcf, 0x3c, 0x1b, - 0x86, 0xa3, 0xad, 0x1a, 0xe7, 0x4c, 0x09, 0xd0, 0x80, 0xf6, 0x8b, 0x72, - 0x96, 0x53, 0x7e, 0x66, 0xfb, 0x7c, 0x7c, 0x8a, 0xb0, 0x60, 0xa6, 0x4c, - 0x20, 0xc4, 0x63, 0x69, 0x6a, 0xc3, 0x53, 0xf8, 0x9a, 0x28, 0x30, 0x9d, - 0x6f, 0x0e, 0x1b, 0xb2, 0x2c, 0xe6, 0x94, 0x9f, 0xfc, 0xc0, 0x8d, 0x71, - 0xbe, 0x37, 0xa6, 0xc9, 0xbd, 0x3c, 0x4a, 0xf3, 0xc4, 0xb3, 0x88, 0x4c, - 0x45, 0x26, 0x4e, 0x2f, 0x83, 0x16, 0x70, 0xb6, 0xc7, 0xb2, 0x36, 0xf0, - 0x0c, 0x67, 0xd2, 0x0a, 0xd3, 0xd9, 0x7c, 0x35, 0x29, 0xac, 0xd4, 0x9c, - 0x6d, 0xfc, 0xec, 0x58, 0x92, 0xf0, 0xba, 0x32, 0x00, 0xae, 0xb1, 0xeb, - 0x4d, 0x8c, 0x1a, 0x20, 0xe7, 0x5c, 0xfc, 0x9a, 0x4d, 0x51, 0x24, 0x7b, - 0x52, 0xeb, 0x13, 0x3d, 0xb4, 0xab, 0xda, 0xb3, 0x74, 0x39, 0xd2, 0xf8, - 0x2d, 0xef, 0x9b, 0x0f, 0xae, 0xf5, 0x3c, 0x99, 0x34, 0xbe, 0x15, 0x5c, - 0x9f, 0x5d, 0xae, 0xf4, 0x72, 0xc2, 0xac, 0x06, 0xbe, 0xad, 0xe4, 0x68, - 0xea, 0xd5, 0xa1, 0xdc, 0xdb, 0xf4, 0x61, 0x51, 0xf5, 0x1a, 0x62, 0x15, - 0xfd, 0x00, 0x51, 0x35, 0x53, 0x6c, 0x39, 0x3e, 0xdb, 0x60, 0x0a, 0x52, - 0xc1, 0x52, 0x3c, 0xd7, 0xab, 0x73, 0xea, 0x1e, 0x38, 0x38, 0x65, 0x35, - 0x35, 0x2b, 0x28, 0x04, 0x5c, 0x82, 0xea, 0x4a, 0x9e, 0x96, 0x72, 0xa4, - 0x8e, 0x42, 0xfd, 0x55, 0xa8, 0x66, 0x7a, 0x40, 0xc9, 0xf2, 0xc2, 0x1e, - 0x5d, 0x09, 0x90, 0x32, 0x18, 0xdb, 0x11, 0x4c, 0x6c, 0x9c, 0x27, 0x62, - 0x0a, 0xe6, 0xc1, 0xdf, 0xf2, 0x6a, 0x8c, 0x26, 0xb4, 0xfb, 0xda, 0xa9, - 0x08, 0x10, 0x3a, 0xf0, 0xe1, 0x64, 0xe5, 0x03, 0x81, 0x7d, 0x15, 0x74, - 0xa1, 0x8d, 0x10, 0xc8, 0xbb, 0x6a, 0x7c, 0x60, 0xa1, 0x09, 0x35, 0x19, - 0x2d, 0x70, 0xb5, 0x36, 0xc8, 0x8b, 0x66, 0x5f, 0xe0, 0xe7, 0xea, 0x70, - 0x2f, 0x5d, 0x3f, 0xae, 0x5e, 0x25, 0x84, 0xdd, 0x9b, 0x69, 0x44, 0x37, - 0x7c, 0x6b, 0x9e, 0x81, 0x18, 0x36, 0x4b, 0xff, 0x86, 0x44, 0x2a, 0x39, - 0x66, 0x7f, 0x71, 0x43, 0xe7, 0x65, 0xfe, 0xfd, 0x34, 0xb9, 0xd9, 0x5a, - 0x00, 0xd1, 0x41, 0x43, 0xc7, 0xbc, 0x65, 0x68, 0xb7, 0x73, 0xff, 0x19, - 0xd3, 0xed, 0x15, 0xa4, 0x67, 0xa1, 0x53, 0x0e, 0xa6, 0xfb, 0x25, 0xce, - 0x9d, 0x5b, 0x73, 0x08, 0xf3, 0x3b, 0x69, 0xe4, 0x94, 0x9b, 0x94, 0x03, - 0xb3, 0x8a, 0x2e, 0x07, 0x0c, 0xef, 0x18, 0x4c, 0x2b, 0x1c, 0x83, 0x9f, - 0x25, 0x20, 0x29, 0x72, 0x11, 0xa0, 0xaa, 0xed, 0x0c, 0xf9, 0xce, 0x94, - 0x0d, 0x7a, 0xb6, 0xb3, 0xa4, 0x57, 0xd6, 0x61, 0xca, 0x1a, 0x0e, 0x89, - 0x6d, 0x99, 0x4d, 0x06, 0xcd, 0x83, 0x7e, 0x09, 0x14, 0x5b, 0xe7, 0x4c, - 0x72, 0xa8, 0x98, 0xc8, 0x27, 0xf3, 0x70, 0x89, 0x87, 0x11, 0xbb, 0x98, - 0x82, 0x77, 0x9d, 0xaa, 0x95, 0x8c, 0xc1, 0xf8, 0x39, 0x27, 0xd5, 0x64, - 0x59, 0x6a, 0x8c, 0xbe, 0xe2, 0xe1, 0xd1, 0x6b, 0xe3, 0xaf, 0x30, 0x6f, - 0xf4, 0x9e, 0x35, 0x0b, 0x10, 0x24, 0x77, 0xd8, 0xa4, 0x30, 0x2e, 0xf7, - 0x97, 0xfd, 0xef, 0x1e, 0x9e, 0xf2, 0xbd, 0xf2, 0x41, 0x73, 0x19, 0xe6, - 0x7b, 0x7f, 0x74, 0x11, 0x91, 0x38, 0xc5, 0xac, 0xd5, 0xb0, 0x48, 0xc4, - 0xe9, 0x41, 0xd4, 0x50, 0x76, 0x13, 0xbf, 0xec, 0xe8, 0x3a, 0xa8, 0x84, - 0x42, 0x98, 0x12, 0x64, 0x95, 0x85, 0x79, 0x29, 0xea, 0x3a, 0xf9, 0xa4, - 0x5c, 0x9c, 0x35, 0x01, 0x68, 0x71, 0xb9, 0x5b, 0xbe, 0xaa, 0x76, 0x9e, - 0x63, 0x1c, 0xc1, 0x83, 0x94, 0xc6, 0x89, 0x2b, 0x1d, 0x00, 0x43, 0x74, - 0x00, 0x41, 0x93, 0x58, 0x52, 0xf9, 0x13, 0xfe, 0x9f, 0x7a, 0xb7, 0x3d, - 0x6b, 0x70, 0x4e, 0x4f, 0x8f, 0xf4, 0x9c, 0xe4, 0x97, 0x62, 0xaf, 0x69, - 0x45, 0xec, 0xf4, 0x53, 0x71, 0xdc, 0xc7, 0x8d, 0x6f, 0xb2, 0x9d, 0xec, - 0x43, 0xdd, 0xc0, 0xe5, 0xd1, 0x6c, 0x1a, 0x82, 0x19, 0xf6, 0x18, 0xd3, - 0x59, 0x0e, 0x07, 0x81, 0x5a, 0x23, 0x10, 0x8b, 0xaa, 0x0b, 0x99, 0xc8, - 0x34, 0xc2, 0xd0, 0xa9, 0x69, 0x7f, 0x54, 0xe3, 0xc4, 0xa0, 0xe7, 0x4b, - 0x31, 0x90, 0xe7, 0x3b, 0x45, 0x9b, 0x7f, 0xae, 0xd2, 0xab, 0x22, 0xb9, - 0xfc, 0x07, 0x39, 0x4b, 0x45, 0x83, 0x8d, 0x41, 0x7a, 0x52, 0xb2, 0xae, - 0x71, 0x78, 0x17, 0x63, 0xfa, 0xbe, 0x59, 0xca, 0xf0, 0xfd, 0x68, 0xe5, - 0xc4, 0x9a, 0x74, 0x3d, 0xec, 0xd4, 0x8b, 0xa1, 0x2c, 0x31, 0x4d, 0x73, - 0xfd, 0x5c, 0x1e, 0xeb, 0x5f, 0xf6, 0x42, 0x0d, 0x79, 0x5f, 0x64, 0x10, - 0xae, 0xb2, 0xf6, 0x9e, 0xa8, 0xab, 0xa5, 0x2b, 0x9a, 0xcf, 0x25, 0xfa, - 0xa2, 0xb3, 0xdc, 0x30, 0x3d, 0x08, 0x4e, 0xbb, 0x7b, 0x0c, 0x28, 0x34, - 0x9d, 0xda, 0xc4, 0x94, 0xa4, 0xf4, 0x1e, 0x78, 0x8b, 0xa9, 0xd3, 0xa7, - 0x1c, 0x2a, 0x27, 0x14, 0xa0, 0x44, 0x1a, 0x9a, 0x87, 0x72, 0xa5, 0x6d, - 0x69, 0x46, 0xe5, 0xc1, 0x4f, 0x29, 0x87, 0xc0, 0xa7, 0xa8, 0x96, 0xde, - 0xa9, 0x63, 0x08, 0xd8, 0x4a, 0xa1, 0x25, 0x43, 0x76, 0x41, 0xf7, 0x9f, - 0x17, 0xe3, 0xe1, 0x4b, 0xc6, 0x2b, 0x79, 0xea, 0xd5, 0xa7, 0x72, 0x16, - 0x0a, 0x8c, 0xcd, 0x49, 0x70, 0x75, 0xd4, 0x59, 0x4a, 0x19, 0x7b, 0x31, - 0x02, 0x7a, 0x3a, 0x20, 0x15, 0x62, 0x7e, 0x4e, 0x6f, 0xac, 0xd0, 0xd1, - 0x29, 0xbd, 0x2d, 0xa1, 0xc6, 0x3e, 0xa6, 0x1a, 0x26, 0x18, 0x96, 0x98, - 0x12, 0x56, 0x37, 0xbf, 0xb4, 0x91, 0x57, 0xe8, 0xda, 0x61, 0x7c, 0x2f, - 0x3e, 0xd4, 0x51, 0xfe, 0xe8, 0x5b, 0x00, 0x30, 0x08, 0xf6, 0x4e, 0x69, - 0xa8, 0x1a, 0x2b, 0x82, 0x41, 0x85, 0xa9, 0xd9, 0x3c, 0xc8, 0x02, 0x91, - 0x99, 0xd4, 0xa2, 0xfd, 0x9d, 0x1b, 0x08, 0xfc, 0x41, 0x3e, 0x10, 0x6b, - 0x80, 0x74, 0x3d, 0x72, 0x61, 0x97, 0xdd, 0x96, 0xec, 0xf4, 0xd6, 0x6d, - 0x68, 0x02, 0x6e, 0xbb, 0x55, 0x9d, 0x6f, 0x11, 0xde, 0xd1, 0xad, 0x6d, - 0x42, 0x96, 0x2c, 0x42, 0x1e, 0xa9, 0x19, 0x42, 0x22, 0x38, 0x38, 0x18, - 0x3c, 0x4b, 0xc1, 0x9c, 0x0f, 0xe1, 0x34, 0x61, 0x06, 0x77, 0x54, 0x04, - 0xe0, 0x87, 0x94, 0x5c, 0xc9, 0xa1, 0x35, 0x55, 0x3d, 0x4a, 0xf2, 0x4f, - 0x05, 0x11, 0x98, 0x6f, 0x3c, 0x85, 0x84, 0xe6, 0xf8, 0x71, 0x8a, 0xdf, - 0xe9, 0x9a, 0xe3, 0x70, 0xd6, 0x36, 0xd6, 0xc8, 0x66, 0x3e, 0xba, 0x7c, - 0x0a, 0x23, 0x0a, 0xd0, 0xb6, 0x66, 0x68, 0xa8, 0xdf, 0x37, 0x17, 0xfb, - 0xdd, 0x9c, 0x8b, 0xc7, 0x8e, 0xc4, 0x4f, 0x40, 0x08, 0x23, 0x58, 0x15, - 0xa2, 0xba, 0xef, 0xdf, 0x67, 0xcd, 0x1f, 0xb6, 0xc4, 0xea, 0xce, 0x81, - 0x38, 0x58, 0x92, 0x57, 0xcf, 0x83, 0x47, 0x29, 0x9f, 0xde, 0x9b, 0xde, - 0x01, 0xfe, 0x68, 0x91, 0x67, 0x06, 0x9d, 0x31, 0xd0, 0xb9, 0xc3, 0xbb, - 0xc3, 0x6b, 0xa0, 0x04, 0x1e, 0x34, 0xd5, 0x38, 0xd4, 0xac, 0x70, 0xae, - 0xab, 0xb2, 0xbd, 0x4b, 0xa0, 0xad, 0x2b, 0x82, 0xaf, 0x8c, 0x90, 0x4d, - 0xd3, 0xca, 0x71, 0x35, 0x75, 0x89, 0xe5, 0x42, 0x91, 0x46, 0x8d, 0x18, - 0x04, 0x7a, 0xb9, 0xaa, 0x3b, 0xe7, 0x1e, 0x8c, 0x4e, 0xf9, 0x6e, 0x74, - 0xaa, 0x2e, 0x36, 0x86, 0xfb, 0xef, 0x9c, 0xd7, 0xba, 0x5e, 0x2e, 0x3c, - 0x40, 0xce, 0x8b, 0x2b, 0x94, 0x55, 0xf2, 0xd4, 0x7d, 0xbf, 0x8c, 0x8a, - 0xa8, 0x59, 0x84, 0x6f, 0x32, 0x95, 0xc5, 0xcc, 0xad, 0xee, 0x30, 0x23, - 0x7c, 0x54, 0xea, 0x60, 0xb8, 0x88, 0x12, 0x45, 0x03, 0xbc, 0xe3, 0x92, - 0x9f, 0xa8, 0x5b, 0x07, 0x97, 0x53, 0x0d, 0xe1, 0xe3, 0x3d, 0xdf, 0xf2, - 0x2a, 0x12, 0xee, 0xdf, 0x73, 0x8d, 0x41, 0xf4, 0xe4, 0x2c, 0xb4, 0xd4, - 0x9e, 0xfe, 0xf2, 0xe6, 0xa0, 0x9e, 0x2a, 0x3a, 0x36, 0x26, 0x7e, 0xd9, - 0xe1, 0x22, 0xee, 0x0b, 0x5b, 0x48, 0xd2, 0xa9, 0x55, 0xab, 0x50, 0x7c, - 0xf6, 0xc8, 0x56, 0x31, 0xbb, 0x51, 0xe9, 0x31, 0x4d, 0xaa, 0x13, 0x3a, - 0x99, 0x9f, 0x8c, 0x59, 0x6a, 0xc9, 0xf1, 0x0a, 0x89, 0xcc, 0x39, 0x98, - 0xbd, 0xc3, 0x93, 0x97, 0x28, 0xe5, 0x73, 0x94, 0xf2, 0x0a, 0x7a, 0x09, - 0x38, 0x0b, 0xab, 0xd8, 0x49, 0x98, 0x14, 0x34, 0x32, 0x9d, 0xef, 0x9d, - 0x47, 0xdb, 0x82, 0xb9, 0x84, 0xd6, 0xd7, 0x9f, 0xf7, 0xdf, 0x79, 0x5b, - 0xe8, 0x92, 0x44, 0x31, 0x5d, 0x42, 0x80, 0x90, 0x8d, 0x36, 0xa2, 0x39, - 0x02, 0x64, 0x21, 0xa2, 0xb8, 0xfc, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x4c, 0xe9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0xd8, 0x03, 0x00, 0x00, 0xdc, 0x03, 0x00, 0x00, 0xe0, 0x03, 0x00, 0x00, - 0x0f, 0x00, 0x00, 0x00, 0xa8, 0x03, 0x00, 0x00, 0x50, 0x03, 0x00, 0x00, - 0x04, 0x03, 0x00, 0x00, 0xac, 0x02, 0x00, 0x00, 0x74, 0x02, 0x00, 0x00, - 0x2c, 0x02, 0x00, 0x00, 0xf4, 0x01, 0x00, 0x00, 0xac, 0x01, 0x00, 0x00, - 0x74, 0x01, 0x00, 0x00, 0x2c, 0x01, 0x00, 0x00, 0xe4, 0x00, 0x00, 0x00, - 0x9c, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x9e, 0xfc, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, - 0x5e, 0xfd, 0xff, 0xff, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x80, 0x3f, 0x01, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x96, 0xfd, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x88, 0xfd, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x2f, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, - 0xca, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x78, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x2a, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, - 0x2d, 0x00, 0x00, 0x00, 0x2e, 0x00, 0x00, 0x00, 0x0e, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0xbc, 0xfd, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x2a, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, - 0x26, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, - 0x29, 0x00, 0x00, 0x00, 0x52, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, - 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x96, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x88, 0xfe, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, - 0x1f, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x78, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, - 0x1a, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x0e, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, - 0x17, 0x00, 0x00, 0x00, 0x42, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xf0, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, - 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x86, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x78, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x0f, 0x00, 0x00, 0x00, 0xba, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x68, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, - 0x0b, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x14, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, 0x0c, 0x00, - 0x0b, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x07, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x10, 0x00, 0x0c, 0x00, - 0x08, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0xac, 0x12, 0x00, 0x00, - 0x3c, 0x12, 0x00, 0x00, 0xdc, 0x11, 0x00, 0x00, 0x90, 0x11, 0x00, 0x00, - 0x24, 0x11, 0x00, 0x00, 0xac, 0x10, 0x00, 0x00, 0x5c, 0x10, 0x00, 0x00, - 0x10, 0x10, 0x00, 0x00, 0xa8, 0x0f, 0x00, 0x00, 0x58, 0x0f, 0x00, 0x00, - 0x04, 0x0f, 0x00, 0x00, 0xb8, 0x0e, 0x00, 0x00, 0x4c, 0x0e, 0x00, 0x00, - 0xe4, 0x0d, 0x00, 0x00, 0x94, 0x0d, 0x00, 0x00, 0x48, 0x0d, 0x00, 0x00, - 0xe0, 0x0c, 0x00, 0x00, 0x90, 0x0c, 0x00, 0x00, 0x3c, 0x0c, 0x00, 0x00, - 0xf0, 0x0b, 0x00, 0x00, 0x84, 0x0b, 0x00, 0x00, 0x1c, 0x0b, 0x00, 0x00, - 0xcc, 0x0a, 0x00, 0x00, 0x80, 0x0a, 0x00, 0x00, 0x18, 0x0a, 0x00, 0x00, - 0xc8, 0x09, 0x00, 0x00, 0x74, 0x09, 0x00, 0x00, 0x28, 0x09, 0x00, 0x00, - 0xbc, 0x08, 0x00, 0x00, 0x54, 0x08, 0x00, 0x00, 0x04, 0x08, 0x00, 0x00, - 0xb8, 0x07, 0x00, 0x00, 0x50, 0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, - 0xac, 0x06, 0x00, 0x00, 0x60, 0x06, 0x00, 0x00, 0xf4, 0x05, 0x00, 0x00, - 0x8c, 0x05, 0x00, 0x00, 0x3c, 0x05, 0x00, 0x00, 0xe8, 0x04, 0x00, 0x00, - 0x9c, 0x04, 0x00, 0x00, 0x30, 0x04, 0x00, 0x00, 0xc8, 0x03, 0x00, 0x00, - 0x78, 0x03, 0x00, 0x00, 0x24, 0x03, 0x00, 0x00, 0xd8, 0x02, 0x00, 0x00, - 0x6c, 0x02, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00, 0xb4, 0x01, 0x00, 0x00, - 0x68, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, - 0x50, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3a, 0xee, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x00, 0x00, 0x00, 0x94, 0xee, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x82, 0xee, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x2c, 0x00, 0x00, 0x00, 0xdc, 0xee, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0a, 0xd7, 0x23, 0x3a, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0xca, 0xee, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x44, 0x00, 0x00, 0x00, 0xbc, 0xee, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x37, 0x01, 0x00, 0x00, 0x00, - 0xc2, 0xff, 0x7f, 0x3f, 0x01, 0x00, 0x00, 0x00, 0xd2, 0x6f, 0x75, 0x36, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x2a, 0xef, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x48, 0x00, 0x00, 0x00, 0x1c, 0xef, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x16, 0x49, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x87, 0x19, 0xb1, 0x40, 0x01, 0x00, 0x00, 0x00, - 0x58, 0x80, 0xdf, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0xfa, 0xef, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x2c, 0x00, 0x00, 0x00, - 0xec, 0xef, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x5d, 0xd1, 0xce, 0x39, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x42, 0xf0, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, - 0x34, 0xf0, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x23, 0x20, 0xb6, 0x3b, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x22, 0xf0, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, - 0x14, 0xf0, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xa2, 0x5a, 0x91, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x47, 0xc9, 0x90, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0xf2, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x48, 0x00, 0x00, 0x00, 0x7c, 0xf0, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x81, 0xb7, 0xf1, 0x39, 0x01, 0x00, 0x00, 0x00, 0x9e, 0xb5, 0x71, 0x41, - 0x01, 0x00, 0x00, 0x00, 0x33, 0x20, 0x70, 0xc1, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x5a, 0xf1, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x00, 0x00, 0x00, 0x4c, 0xf1, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7a, 0x08, 0x97, 0x35, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xa2, 0xf1, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x30, 0x00, 0x00, 0x00, 0x94, 0xf1, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x2f, 0xf5, 0x1f, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xf2, 0xf1, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, - 0xe4, 0xf1, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xc7, 0xea, 0x1a, 0x3c, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xd2, 0xf1, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, - 0xc4, 0xf1, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xb2, 0x78, 0x3f, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x39, 0xb9, 0x3e, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xb0, 0xf3, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x48, 0x00, 0x00, 0x00, 0x2c, 0xf2, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x89, 0x25, 0xf2, 0x39, 0x01, 0x00, 0x00, 0x00, 0xde, 0xdc, 0x1d, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xa5, 0x23, 0x72, 0xc1, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x0a, 0xf3, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x00, 0x00, 0x00, 0xfc, 0xf2, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x42, 0xe0, 0x90, 0x35, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x52, 0xf3, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x30, 0x00, 0x00, 0x00, 0x44, 0xf3, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x1a, 0x2a, 0x19, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xa2, 0xf3, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, - 0x94, 0xf3, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xe9, 0x36, 0xdd, 0x3b, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x82, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, - 0x74, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xdd, 0x43, 0x7e, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x99, 0x45, 0x7d, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60, 0xf5, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x48, 0x00, 0x00, 0x00, 0xdc, 0xf3, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x5c, 0xfd, 0xa9, 0x39, 0x01, 0x00, 0x00, 0x00, 0x1e, 0xaa, 0x87, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x08, 0xfc, 0x29, 0xc1, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xba, 0xf4, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x00, 0x00, 0x00, 0xac, 0xf4, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x55, 0xf7, 0x52, 0x35, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x02, 0xf5, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x30, 0x00, 0x00, 0x00, 0xf4, 0xf4, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xd0, 0xda, 0x1e, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x52, 0xf5, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, - 0x44, 0xf5, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x8e, 0x0b, 0xa8, 0x3b, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x32, 0xf5, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, - 0x24, 0xf5, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x12, 0x1c, 0x6e, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0xdd, 0x4a, 0x00, 0x41, 0x01, 0x00, 0x00, 0x00, 0x31, 0xc6, 0xd9, 0xc0, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0xf6, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x2c, 0x00, 0x00, 0x00, 0xf4, 0xf5, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0x9d, 0x16, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x4a, 0xf6, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, 0x3c, 0xf6, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xa4, 0x34, 0xab, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x2a, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, 0x1c, 0xf6, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x2e, 0x36, 0xe1, 0x3c, 0x01, 0x00, 0x00, 0x00, 0xf8, 0x54, 0xe0, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0xf8, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x48, 0x00, 0x00, 0x00, 0x84, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xe1, 0xd0, 0xa2, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x9b, 0xcf, 0x22, 0x41, 0x01, 0x00, 0x00, 0x00, - 0xea, 0x23, 0x12, 0xc1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, - 0x62, 0xf7, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x2c, 0x00, 0x00, 0x00, - 0x54, 0xf7, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x99, 0xd3, 0xf7, 0x34, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xaa, 0xf7, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, - 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x30, 0x00, 0x00, 0x00, - 0x9c, 0xf7, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0xd5, 0xc2, 0x3a, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0xfa, 0xf7, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, 0xec, 0xf7, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x8f, 0x84, 0xa2, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xda, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, 0xcc, 0xf7, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xf7, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x64, 0xeb, 0x8e, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x3b, 0xf3, 0x17, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xb7, 0xc5, 0x04, 0xc1, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xaa, 0xf8, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x00, 0x00, 0x00, 0x9c, 0xf8, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x92, 0xa8, 0x98, 0x39, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xf2, 0xf8, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x2c, 0x00, 0x00, 0x00, 0xe4, 0xf8, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x58, 0x76, 0xb9, 0x3b, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xd2, 0xf8, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x48, 0x00, 0x00, 0x00, 0xc4, 0xf8, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x43, 0xb8, 0x52, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x8b, 0xe5, 0x51, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xb0, 0xfa, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x48, 0x00, 0x00, 0x00, - 0x2c, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xe3, 0xa1, 0xf0, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x02, 0xa0, 0x70, 0x41, 0x01, 0x00, 0x00, 0x00, 0x87, 0x08, 0x65, 0xc1, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x0a, 0xfa, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x2c, 0x00, 0x00, 0x00, 0xfc, 0xf9, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xcc, 0x98, 0x41, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x52, 0xfa, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x30, 0x00, 0x00, 0x00, 0x44, 0xfa, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xed, 0xf5, 0xcd, 0x3a, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0xa2, 0xfa, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x2c, 0x00, 0x00, 0x00, 0x94, 0xfa, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x9d, 0xca, 0xd4, 0x3b, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x82, 0xfa, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x48, 0x00, 0x00, 0x00, 0x74, 0xfa, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x58, 0x58, 0xce, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x49, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x06, 0x52, 0xc1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x52, 0xfb, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x2c, 0x00, 0x00, 0x00, - 0x44, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x9b, 0x9c, 0xe1, 0x39, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x9a, 0xfb, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, - 0x8c, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xf8, 0xb6, 0xc3, 0x3b, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x7a, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, - 0x6c, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x94, 0x8d, 0x93, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x06, 0xfa, 0x92, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x58, 0xfd, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x48, 0x00, 0x00, 0x00, 0xd4, 0xfb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x7a, 0xf6, 0x5f, 0x3a, 0x01, 0x00, 0x00, 0x00, 0xba, 0xf4, 0xdf, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xf4, 0x7c, 0xcf, 0xc1, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x02, 0x00, 0x00, 0xb2, 0xfc, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x00, 0x00, 0x00, 0xa4, 0xfc, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x46, 0x2f, 0xc4, 0x35, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xfa, 0xfc, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x30, 0x00, 0x00, 0x00, 0xec, 0xfc, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x8f, 0x3f, 0xe0, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x4a, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, - 0x3c, 0xfd, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x25, 0xd7, 0xa9, 0x3b, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x2a, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, - 0x1c, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xc4, 0xf4, 0x39, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0xf4, 0x1f, 0xe3, 0x41, 0x01, 0x00, 0x00, 0x00, 0xaa, 0x55, 0x8f, 0xc1, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xfa, 0xfd, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x2c, 0x00, 0x00, 0x00, 0xec, 0xfd, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x8b, 0x00, 0x4b, 0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x42, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x2c, 0x00, 0x00, 0x00, 0x34, 0xfe, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xd7, 0xdf, 0xc3, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x22, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x48, 0x00, 0x00, 0x00, 0x14, 0xfe, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x68, 0xa8, 0x04, 0x3e, 0x01, 0x00, 0x00, 0x00, 0xc0, 0x23, 0x04, 0x42, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x18, 0x00, 0x14, 0x00, 0x13, 0x00, - 0x00, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x48, 0x00, 0x00, 0x00, 0x8c, 0xfe, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x3b, 0xda, 0x75, 0x3b, 0x01, 0x00, 0x00, 0x00, 0x4f, 0xd8, 0xf5, 0x42, - 0x01, 0x00, 0x00, 0x00, 0xa8, 0x2a, 0x61, 0xc2, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x02, 0x00, 0x00, 0x6a, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x00, 0x00, 0x00, 0x5c, 0xff, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xcf, 0x37, 0x69, 0x37, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xb2, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x30, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x2c, 0x00, 0x00, 0x00, 0xa4, 0xff, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0xd8, 0x72, 0x3b, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x18, 0x00, 0x14, 0x00, 0x13, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xd4, 0x42, 0x16, 0x3c, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x54, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x14, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xa8, 0x41, 0x5b, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x66, 0x66, 0x5a, 0x41, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x60, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, - 0xb4, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, - 0x8c, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00, - 0x68, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, - 0x44, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, - 0x0c, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x96, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x72, 0x9e, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x19, - 0xa6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xae, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x1b, 0xb6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, - 0xbe, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, 0xc6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0xce, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, - 0xd6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xde, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x1b, 0xe6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0xfa, 0xff, 0xff, 0xff, 0x00, 0x1b, 0x06, 0x00, 0x06, 0x00, 0x05, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x09, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b}; - -const unsigned int g_keyword_scrambled_model_data_length = 34520; diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/add_n.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/add_n.cc new file mode 100644 index 00000000..6f14c670 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/add_n.cc @@ -0,0 +1,119 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/add_n.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor0 = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + int num_inputs = NumInputs(node); + TF_LITE_ENSURE(context, num_inputs >= 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + const TfLiteTensor* input_tensor_first; + TF_LITE_ENSURE_OK( + context, GetInputSafe(context, node, kInputTensor0, &input_tensor_first)); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + + // Check that all tensors have the same shape and type. + TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_tensor_first->type); + for (int i = kInputTensor0 + 1; i < num_inputs; ++i) { + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input)); + TF_LITE_ENSURE(context, HaveSameShapes(input_tensor_first, input)); + TF_LITE_ENSURE_TYPES_EQ(context, input_tensor_first->type, input->type); + } + + // Allocate scratch buffer space for pointer to each tensor's data + // and store the scratch buffer index in the node's user_data + if (output->type == kTfLiteFloat32) { + int scratch_index; + size_t scratch_size = sizeof(float*) * num_inputs; + TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( + context, scratch_size, &scratch_index)); + node->user_data = + reinterpret_castuser_data)>(scratch_index); + } else { + TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +template +void EvalAddN(TfLiteContext* context, TfLiteNode* node, + TfLiteEvalTensor* output) { + int num_inputs = NumInputs(node); + + int scratch_index = + static_cast(reinterpret_cast(node->user_data)); + void* scratch_buffer = context->GetScratchBuffer(context, scratch_index); + const T** all_inputs = static_cast(scratch_buffer); + for (int i = 0; i < num_inputs; i++) { + const TfLiteEvalTensor* next_input = + tflite::micro::GetEvalInput(context, node, kInputTensor0 + i); + all_inputs[i] = tflite::micro::GetTensorData(next_input); + } + + reference_ops::AddN(tflite::micro::GetTensorShape(output), num_inputs, + all_inputs, tflite::micro::GetTensorData(output)); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + if (output->type == kTfLiteFloat32) { + EvalAddN(context, node, output); + } else { + TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_ADD_N() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/batch_to_space_nd.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/batch_to_space_nd.cc new file mode 100644 index 00000000..a6fa0462 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/batch_to_space_nd.cc @@ -0,0 +1,111 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kBlockShapeTensor = 1; +constexpr int kCropsTensor = 2; +constexpr int kOutputTensor = 0; + +// Currently, only 3D NHC and 4D NHWC input/output op_context are supported. +// In case of 3D input, it will be extended to 3D NHWC by adding W=1. +// The 4D array need to have exactly 2 spatial dimensions. +// TODO(b/149952582): Support arbitrary dimension in SpaceToBatchND. +const int kInputOutputMinDimensionNum = 3; +const int kInputOutputMaxDimensionNum = 4; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr && output != nullptr); + + TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(output) >= kInputOutputMinDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(input) <= kInputOutputMaxDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* block_shape = + tflite::micro::GetEvalInput(context, node, kBlockShapeTensor); + const TfLiteEvalTensor* crops = + tflite::micro::GetEvalInput(context, node, kCropsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + reference_ops::BatchToSpaceND( + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(block_shape), + tflite::micro::GetTensorData(block_shape), + tflite::micro::GetTensorShape(crops), + tflite::micro::GetTensorData(crops), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::BatchToSpaceND( + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(block_shape), + tflite::micro::GetTensorData(block_shape), + tflite::micro::GetTensorShape(crops), + tflite::micro::GetTensorData(crops), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace. + +TfLiteRegistration Register_BATCH_TO_SPACE_ND() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/cast.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/cast.cc new file mode 100644 index 00000000..b0462ed6 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/cast.cc @@ -0,0 +1,96 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + return kTfLiteOk; +} + +template +void copyCast(const FromT* in, ToT* out, int num_elements) { + std::transform(in, in + num_elements, out, + [](FromT a) { return static_cast(a); }); +} + +template +TfLiteStatus copyToTensor(TfLiteContext* context, const FromT* in, + TfLiteEvalTensor* out, int num_elements) { + switch (out->type) { + case kTfLiteInt8: + copyCast(in, out->data.int8, num_elements); + break; + case kTfLiteFloat32: + copyCast(in, tflite::micro::GetTensorData(out), num_elements); + break; + default: + // Unsupported type. + TF_LITE_KERNEL_LOG(context, "Output type %s (%d) not supported.", + TfLiteTypeGetName(out->type), out->type); + } + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + int num_elements = MatchingFlatSize(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output)); + + switch (input->type) { + case kTfLiteInt8: + return copyToTensor(context, input->data.int8, output, num_elements); + case kTfLiteFloat32: + return copyToTensor(context, tflite::micro::GetTensorData(input), + output, num_elements); + default: + // Unsupported type. + TF_LITE_KERNEL_LOG(context, "Input type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + } + return kTfLiteOk; +} +} // namespace + +TfLiteRegistration Register_CAST() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer.cc index f7020306..d9c898b0 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#define FLATBUFFERS_LOCALE_INDEPENDENT 0 +#include "flatbuffers/flexbuffers.h" #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" @@ -55,7 +57,7 @@ constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; // TODO(b/149795762): Add this to TfLiteStatus enum. -constexpr int kTfLiteAbort = -9; +constexpr TfLiteStatus kTfLiteAbort = static_cast(-9); // These fields control the stride period of a strided streaming model. This op // returns kTfLiteAbort until cycles_until_run-- is zero. At this time, @@ -65,47 +67,64 @@ struct OpData { int cycles_max; }; -// These constants represent constants specific to the music detect model. -// They exist until (b/132070898) is fixed. -constexpr int kMaxOpDataSize = 7; -int op_data_counter = 0; -OpData op_data_array[kMaxOpDataSize]; - } // namespace -void Free(TfLiteContext* context, void* buffer) { op_data_counter = 0; } +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + OpData* op_data = static_cast( + context->AllocatePersistentBuffer(context, sizeof(OpData))); + + if (buffer != nullptr && length > 0) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); + op_data->cycles_max = m["cycles_max"].AsInt32(); + } else { + op_data->cycles_max = 0; + } + + return op_data; +} TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); + + TFLITE_DCHECK(node->user_data != nullptr); + OpData* op_data = static_cast(node->user_data); TF_LITE_ENSURE(context, input != nullptr); TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_EQ(context, 1, output->dims->data[0]); - TF_LITE_ENSURE_EQ(context, 1, input->dims->data[0]); + TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]); TF_LITE_ENSURE_EQ(context, 1, input->dims->data[1]); - TF_LITE_ENSURE_EQ(context, 1, output->dims->data[2]); - TF_LITE_ENSURE_EQ(context, 1, input->dims->data[2]); + TF_LITE_ENSURE_EQ(context, input->dims->data[2], output->dims->data[2]); TF_LITE_ENSURE_EQ(context, output->dims->data[3], input->dims->data[3]); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - // The circular buffer custom operator currently only supports int8_t. + // The circular buffer custom operator currently only supports int8. TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); - // TODO(b/132070898): Use statically slotted OpData structures until a - // scratch memory API is ready. - TFLITE_DCHECK_LE(op_data_counter, kMaxOpDataSize); - OpData* op_data = &op_data_array[op_data_counter++]; - // The last circular buffer layer (length 5) simply accumulates outputs, and - // does not run periodically. - // TODO(b/150001379): Move this special case logic to the tflite flatbuffer. - if (output->dims->data[1] == 5) { - op_data->cycles_max = 1; - } else { - op_data->cycles_max = 2; + if (op_data->cycles_max <= 0) { + // The last circular buffer layer simply accumulates outputs, and does not + // run periodically. + // TODO(b/150001379): Move this special case logic to the tflite flatbuffer. + static int cb_prepare_count = 0; + cb_prepare_count++; + // These checks specifically work for the only two streaming models + // supported on TFLM. They use the shape of the output tensor along with the + // layer number to determine if the circular buffer period should be 1 or 2. + + // These models are outlined int the following documents: + // https://docs.google.com/document/d/1lc_G2ZFhjiKFo02UHjBaljye1xsL0EkfybkaVELEE3Q/edit?usp=sharing + // https://docs.google.com/document/d/1pGc42PuWyrk-Jy1-9qeqtggvsmHr1ifz8Lmqfpr2rKA/edit?usp=sharing + if (output->dims->data[1] == 5 || output->dims->data[1] == 13 || + (cb_prepare_count == 5 && output->dims->data[2] == 2 && + output->dims->data[3] == 96)) { + op_data->cycles_max = 1; + cb_prepare_count = 0; + } else { + op_data->cycles_max = 2; + } } op_data->cycles_until_run = op_data->cycles_max; node->user_data = op_data; @@ -127,10 +146,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TFLITE_DCHECK(node->user_data != nullptr); OpData* data = reinterpret_cast(node->user_data); int num_slots = output->dims->data[1]; - int depth = output->dims->data[3]; + int depth = output->dims->data[2] * output->dims->data[3]; if (input->type == kTfLiteInt8) { EvalInt8(tflite::micro::GetTensorData(input), num_slots, depth, @@ -148,12 +168,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return static_cast(kTfLiteAbort); } - // If prepare is ever called more than one time (for example, when testing the - // ambient model, the interpreter is created a few times), this op data - // counter needs to be reset so that future instances do not overrun this op - // data array. - op_data_counter = 0; - data->cycles_until_run = data->cycles_max; return kTfLiteOk; @@ -162,8 +176,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace circular_buffer TfLiteRegistration* Register_CIRCULAR_BUFFER() { - static TfLiteRegistration r = {/*init=*/nullptr, - /*free=*/circular_buffer::Free, + static TfLiteRegistration r = {/*init=*/circular_buffer::Init, + /*free=*/nullptr, /*prepare=*/circular_buffer::Prepare, /*invoke=*/circular_buffer::Eval, /*profiling_string=*/nullptr, diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h new file mode 100644 index 00000000..2fbf4fe9 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h @@ -0,0 +1,22 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H +#define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H + +extern const int g_gen_data_size_circular_buffer_config; +extern const unsigned char g_gen_data_circular_buffer_config[]; + +#endif diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/conv.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/conv.cc index 55efa486..e9cbdf15 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/conv.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/conv.cc @@ -13,12 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/conv.h" +#include "tensorflow/lite/micro/kernels/conv.h" #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/conv.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" @@ -28,294 +29,60 @@ limitations under the License. namespace tflite { namespace { -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Conv is quantized along dimension 0: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kConvQuantizedDimension = 0; - -// This file has 2 implementation of Conv. - -struct OpData { - TfLitePaddingValues padding; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - // Per channel output multiplier and shift. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; - - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; -}; - -inline PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - const TfLiteConvParams* params, int width, - int height, int filter_width, int filter_height, - int out_width, int out_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Matching GetWindowedOutputSize in TensorFlow. - auto padding = params->padding; - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - params->dilation_height_factor, params->dilation_width_factor, height, - width, filter_height, filter_width, padding, &out_height, &out_width); - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - int output_channels = filter->dims->data[kConvQuantizedDimension]; - - TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->output_multiplier, &data->output_shift, - &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), - output_channels)); - } - return kTfLiteOk; -} - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = static_cast(node->builtin_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - - int input_width = input->dims->data[2]; - int input_height = input->dims->data[1]; - int filter_width = filter->dims->data[2]; - int filter_height = filter->dims->data[1]; - int output_width = output->dims->data[2]; - int output_height = output->dims->data[1]; - - // Dynimically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - static_cast(filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - - TF_LITE_ENSURE(context, - affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData( - context, node, params, input_width, input_height, filter_width, - filter_height, output_width, output_height, input->type, data)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - return kTfLiteOk; -} // namespace conv - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* im2col, TfLiteEvalTensor* hwcn_weights, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - // TODO(b/154032858): Investigate removing extra copies. - ConvParams op_params; - op_params.padding_type = RuntimePaddingType(params->padding); - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(im2col), - tflite::micro::GetTensorData(im2col), nullptr); -} - -void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output, - TfLiteEvalTensor* im2col) { - // TODO(b/154032858): Investigate removing extra copies. - ConvParams op_params; - op_params.input_offset = -data.input_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - - reference_integer_ops::ConvPerChannel( - op_params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* im2col, - TfLiteEvalTensor* hwcn_weights, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - // TODO(b/154032858): Investigate removing extra copies. - ConvParams op_params; - op_params.padding_type = RuntimePaddingType(params->padding); - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(im2col), - tflite::micro::GetTensorData(im2col)); + return context->AllocatePersistentBuffer(context, sizeof(OpDataConv)); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kConvInputTensor); const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); const TfLiteEvalTensor* bias = (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) : nullptr; TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); + const auto& data = *(static_cast(node->user_data)); TF_LITE_ENSURE_EQ(context, input->type, output->type); TF_LITE_ENSURE_MSG(context, input->type == filter->type, "Hybrid models are not supported on TFLite Micro."); switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input, filter, bias, nullptr, - nullptr, output); + case kTfLiteFloat32: { + tflite::reference_ops::Conv( + ConvParamsFloat(params, data), tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr); break; - case kTfLiteInt8: - EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, - output, nullptr); - break; - case kTfLiteUInt8: - EvalQuantized(context, node, params, data, input, filter, bias, nullptr, - nullptr, output); + } + case kTfLiteInt8: { + reference_integer_ops::ConvPerChannel( + ConvParamsQuantized(params, data), data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); break; + } default: TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", TfLiteTypeGetName(input->type), input->type); @@ -329,7 +96,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration Register_CONV_2D() { return {/*init=*/Init, /*free=*/nullptr, - /*prepare=*/Prepare, + /*prepare=*/ConvPrepare, /*invoke=*/Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/conv.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/conv.h new file mode 100644 index 00000000..46bc7318 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/conv.h @@ -0,0 +1,77 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +struct OpDataConv { + TfLitePaddingValues padding; + + // Cached tensor zero point values for quantized operations. + int32_t input_zero_point; + int32_t filter_zero_point; + int32_t output_zero_point; + + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + + // Per channel output multiplier and shift. + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; + + // The range of the fused activation layer. For example for kNone and + // uint8_t these would be 0 and 255. + int32_t output_activation_min; + int32_t output_activation_max; +}; + +extern const int kConvInputTensor; +extern const int kConvWeightsTensor; +extern const int kConvBiasTensor; +extern const int kConvOutputTensor; +extern const int kConvQuantizedDimension; + +// Returns a ConvParams struct with all the parameters needed for a +// float computation. +ConvParams ConvParamsFloat(const TfLiteConvParams& params, + const OpDataConv& data); + +// Returns a ConvParams struct with all the parameters needed for a +// quantized computation. +ConvParams ConvParamsQuantized(const TfLiteConvParams& params, + const OpDataConv& data); + +TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams& params, int width, + int height, int filter_width, + int filter_height, int out_width, + int out_height, const TfLiteType data_type, + OpDataConv* data); + +TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/conv_common.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/conv_common.cc new file mode 100644 index 00000000..a4a36ae1 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/conv_common.cc @@ -0,0 +1,182 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/conv.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/conv.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +const int kConvInputTensor = 0; +const int kConvWeightsTensor = 1; +const int kConvBiasTensor = 2; +const int kConvOutputTensor = 0; + +// Conv is quantized along dimension 0: +// https://www.tensorflow.org/lite/performance/quantization_spec +const int kConvQuantizedDimension = 0; + +// Returns a ConvParams struct with all the parameters needed for a +// float computation. +ConvParams ConvParamsFloat(const TfLiteConvParams& params, + const OpDataConv& data) { + ConvParams op_params; + CalculateActivationRange(params.activation, &op_params.float_activation_min, + &op_params.float_activation_max); + op_params.padding_type = tflite::micro::RuntimePaddingType(params.padding); + op_params.padding_values.width = data.padding.width; + op_params.padding_values.height = data.padding.height; + op_params.stride_width = params.stride_width; + op_params.stride_height = params.stride_height; + op_params.dilation_width_factor = params.dilation_width_factor; + op_params.dilation_height_factor = params.dilation_height_factor; + return op_params; +} + +// Returns a ConvParams struct with all the parameters needed for a +// quantized computation. +ConvParams ConvParamsQuantized(const TfLiteConvParams& params, + const OpDataConv& data) { + ConvParams op_params; + op_params.input_offset = -data.input_zero_point; + op_params.weights_offset = -data.filter_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.output_multiplier = data.output_multiplier; + op_params.output_shift = -data.output_shift; + op_params.padding_type = tflite::micro::RuntimePaddingType(params.padding); + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.stride_height = params.stride_height; + op_params.stride_width = params.stride_width; + op_params.dilation_height_factor = params.dilation_height_factor; + op_params.dilation_width_factor = params.dilation_width_factor; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; + return op_params; +} + +TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams& params, int width, + int height, int filter_width, + int filter_height, int out_width, + int out_height, const TfLiteType data_type, + OpDataConv* data) { + bool has_bias = node->inputs->size == 3; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params.padding; + data->padding = ComputePaddingHeightWidth( + params.stride_height, params.stride_width, params.dilation_height_factor, + params.dilation_width_factor, height, width, filter_height, filter_width, + padding, &out_height, &out_width); + + const TfLiteTensor* input = GetInput(context, node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteTensor* filter = GetInput(context, node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + const TfLiteTensor* bias = + GetOptionalInputTensor(context, node, kConvBiasTensor); + TfLiteTensor* output = GetOutput(context, node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. + if (data_type != kTfLiteFloat32) { + int output_channels = filter->dims->data[kConvQuantizedDimension]; + + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params.activation, + &data->output_multiplier, &data->output_shift, + &data->output_activation_min, &data->output_activation_max, + data->per_channel_output_multiplier, + reinterpret_cast(data->per_channel_output_shift), + output_channels)); + } + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + return kTfLiteOk; +} + +TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpDataConv* data = static_cast(node->user_data); + const auto& params = + *(static_cast(node->builtin_data)); + + TfLiteTensor* output = GetOutput(context, node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + const TfLiteTensor* input = GetInput(context, node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteTensor* filter = GetInput(context, node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + + const int input_width = input->dims->data[2]; + const int input_height = input->dims->data[1]; + const int filter_width = filter->dims->data[2]; + const int filter_height = filter->dims->data[1]; + const int output_width = output->dims->data[2]; + const int output_height = output->dims->data[1]; + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TFLITE_DCHECK(affine_quantization != nullptr); + TFLITE_DCHECK(affine_quantization->scale != nullptr); + TFLITE_DCHECK(affine_quantization->zero_point != nullptr); + + TF_LITE_ENSURE(context, + affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataConv( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, input->type, data)); + + return kTfLiteOk; +} +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/conv_test.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/conv_test.h new file mode 100644 index 00000000..a821a88f --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/conv_test.h @@ -0,0 +1,94 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/kernels/kernel_runner.h" +#include "tensorflow/lite/micro/kernels/micro_ops.h" +#include "tensorflow/lite/micro/test_helpers.h" +#include "tensorflow/lite/micro/testing/micro_test.h" + +namespace tflite { +namespace testing { + +TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, + int output_length, TfLiteConvParams* conv_params, + TfLiteRegistration registration, float* output_data); + +TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, + int output_length, TfLiteConvParams* conv_params, + TfLiteRegistration registration, int8_t* output_data); + +TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, + int output_length, TfLiteConvParams* conv_params, + TfLiteRegistration registration, uint8_t* output_data); + +TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, + const float* expected_output_data, + int output_length, + TfLiteConvParams* conv_params, + TfLiteRegistration registration, + float* output_data, float tolerance = 1e-5); + +TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, + const int8_t* expected_output_data, + int output_length, + TfLiteConvParams* conv_params, + TfLiteRegistration registration, + int8_t* output_data, float tolerance = 1e-5); + +TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, + const uint8_t* expected_output_data, + int output_length, + TfLiteConvParams* conv_params, + TfLiteRegistration registration, + uint8_t* output_data, float tolerance = 1e-5); + +TfLiteStatus TestConvFloat(const int* input_dims_data, const float* input_data, + const int* filter_dims_data, + const float* filter_data, const int* bias_dims_data, + const float* bias_data, const int* output_dims_data, + const float* expected_output_data, + TfLiteConvParams* conv_params, + TfLiteRegistration registration, float* output_data); + +TfLiteStatus TestConvQuantizedPerLayer( + const int* input_dims_data, const float* input_data, + uint8_t* input_quantized, float input_scale, const int* filter_dims_data, + const float* filter_data, uint8_t* filter_quantized, float filter_scale, + const int* bias_dims_data, const float* bias_data, int32_t* bias_quantized, + const int* output_dims_data, const float* expected_output_data, + uint8_t* expected_output_quantized, float output_scale, + TfLiteConvParams* conv_params, TfLiteRegistration registration, + uint8_t* output_data); + +TfLiteStatus TestConvQuantizedPerChannel( + const int* input_dims_data, const float* input_data, + int8_t* input_quantized, float input_scale, int input_zero_point, + const int* filter_dims_data, const float* filter_data, + int8_t* filter_data_quantized, const int* bias_dims_data, + const float* bias_data, int32_t* bias_data_quantized, float* bias_scales, + int* bias_zero_points, const int* output_dims_data, + const float* expected_output_data, int8_t* expected_output_data_quantized, + float output_scale, int output_zero_point, TfLiteConvParams* conv_params, + TfLiteRegistration registration, int8_t* output_data); + +} // namespace testing +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv.cc index 85b51233..4f67158c 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" +#include "tensorflow/lite/micro/kernels/depthwise_conv.h" #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" @@ -21,6 +21,7 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" #include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/padding.h" @@ -29,279 +30,58 @@ limitations under the License. namespace tflite { namespace { -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Depthwise conv is quantized along dimension 3: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kDepthwiseConvQuantizedDimension = 3; - -struct OpData { - TfLitePaddingValues padding; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - // Per channel output multiplier and shift. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, int width, - int height, int filter_width, int filter_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - int unused_output_height, unused_output_width; - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, 1, 1, height, width, - filter_height, filter_width, params->padding, &unused_output_height, - &unused_output_width); - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - - return tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->output_multiplier, &data->output_shift, - &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), num_channels); - } - return kTfLiteOk; -} - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* params = - reinterpret_cast(node->builtin_data); - OpData* data = static_cast(node->user_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - - const TfLiteType data_type = input->type; - int width = SizeOfDimension(input, 2); - int height = SizeOfDimension(input, 1); - int filter_width = SizeOfDimension(filter, 2); - int filter_height = SizeOfDimension(filter, 1); - - // Per channel quantization is only needed for int8_t inference. For other - // quantized types, only a single scale and zero point is needed. - const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - // Dynimically allocate per-channel quantization parameters. - data->per_channel_output_multiplier = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - reinterpret_cast( - filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - TF_LITE_ENSURE( - context, affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kDepthwiseConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, - filter_width, filter_height, data_type, - data)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - return kTfLiteOk; -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - - tflite::DepthwiseParams op_params; - // Padding type is ignored, but still set. - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - tflite::reference_ops::DepthwiseConv( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - DepthwiseParams op_params; - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.input_offset = -data.input_zero_point; - op_params.weights_offset = 0; - op_params.output_offset = data.output_zero_point; - // TODO(b/130439627): Use calculated value for clamping. - op_params.quantized_activation_min = std::numeric_limits::min(); - op_params.quantized_activation_max = std::numeric_limits::max(); - - reference_integer_ops::DepthwiseConvPerChannel( - op_params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - tflite::DepthwiseParams op_params; - // Padding type is ignored, but still set. - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data.output_shift; - - tflite::reference_ops::DepthwiseConv( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); + return context->AllocatePersistentBuffer(context, sizeof(OpDataConv)); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = - reinterpret_cast(node->builtin_data); - const OpData& data = *(static_cast(node->user_data)); + auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpDataConv& data = *(static_cast(node->user_data)); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); const TfLiteEvalTensor* bias = (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) : nullptr; - // TODO(aselle): Consider whether float conv and quantized conv should be - // separate ops to avoid dispatch overhead here. switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input, filter, bias, output); + case kTfLiteFloat32: { + tflite::reference_ops::DepthwiseConv( + DepthwiseConvParamsFloat(params, data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); break; - case kTfLiteInt8: - EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, - output); - break; - case kTfLiteUInt8: - EvalQuantized(context, node, params, data, input, filter, bias, output); + } + case kTfLiteInt8: { + reference_integer_ops::DepthwiseConvPerChannel( + DepthwiseConvParamsQuantized(params, data), + data.per_channel_output_multiplier, data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); break; + } default: TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", TfLiteTypeGetName(input->type), input->type); @@ -315,7 +95,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration Register_DEPTHWISE_CONV_2D() { return {/*init=*/Init, /*free=*/nullptr, - /*prepare=*/Prepare, + /*prepare=*/DepthwiseConvPrepare, /*invoke=*/Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv.h new file mode 100644 index 00000000..7a7eb0ba --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv.h @@ -0,0 +1,54 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/micro/kernels/conv.h" + +namespace tflite { + +extern const int kDepthwiseConvInputTensor; +extern const int kDepthwiseConvWeightsTensor; +extern const int kDepthwiseConvBiasTensor; +extern const int kDepthwiseConvOutputTensor; +extern const int kDepthwiseConvQuantizedDimension; + +// Returns a DepthwiseParams struct with all the parameters needed for a +// float computation. +DepthwiseParams DepthwiseConvParamsFloat( + const TfLiteDepthwiseConvParams& params, const OpDataConv& data); + +// Returns a DepthwiseParams struct with all the parameters needed for a +// quantized computation. +DepthwiseParams DepthwiseConvParamsQuantized( + const TfLiteDepthwiseConvParams& params, const OpDataConv& data); + +TfLiteStatus CalculateOpDataDepthwiseConv( + TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, int width, int height, + int filter_width, int filter_height, int out_width, int out_height, + const TfLiteType data_type, OpDataConv* data); + +TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv_common.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv_common.cc new file mode 100644 index 00000000..6e6693aa --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/depthwise_conv_common.cc @@ -0,0 +1,188 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" +#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/depthwise_conv.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +const int kDepthwiseConvInputTensor = 0; +const int kDepthwiseConvWeightsTensor = 1; +const int kDepthwiseConvBiasTensor = 2; +const int kDepthwiseConvOutputTensor = 0; + +// DepthwiseConv is quantized along dimension 3: +// https://www.tensorflow.org/lite/performance/quantization_spec +const int kDepthwiseConvQuantizedDimension = 3; + +// Returns a DepthwiseParams struct with all the parameters needed for a +// float computation. +DepthwiseParams DepthwiseConvParamsFloat( + const TfLiteDepthwiseConvParams& params, const OpDataConv& data) { + DepthwiseParams op_params; + CalculateActivationRange(params.activation, &op_params.float_activation_min, + &op_params.float_activation_max); + op_params.padding_type = tflite::micro::RuntimePaddingType(params.padding); + op_params.padding_values.width = data.padding.width; + op_params.padding_values.height = data.padding.height; + op_params.stride_width = params.stride_width; + op_params.stride_height = params.stride_height; + op_params.dilation_width_factor = params.dilation_width_factor; + op_params.dilation_height_factor = params.dilation_height_factor; + op_params.depth_multiplier = params.depth_multiplier; + return op_params; +} + +// Returns a DepthwiseParams struct with all the parameters needed for a +// quantized computation. +DepthwiseParams DepthwiseConvParamsQuantized( + const TfLiteDepthwiseConvParams& params, const OpDataConv& data) { + DepthwiseParams op_params; + op_params.input_offset = -data.input_zero_point; + op_params.weights_offset = -data.filter_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.output_multiplier = data.output_multiplier; + op_params.output_shift = -data.output_shift; + op_params.padding_type = tflite::micro::RuntimePaddingType(params.padding); + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.stride_height = params.stride_height; + op_params.stride_width = params.stride_width; + op_params.dilation_height_factor = params.dilation_height_factor; + op_params.dilation_width_factor = params.dilation_width_factor; + op_params.depth_multiplier = params.depth_multiplier; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; + return op_params; +} + +TfLiteStatus CalculateOpDataDepthwiseConv( + TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, int width, int height, + int filter_width, int filter_height, int out_width, int out_height, + const TfLiteType data_type, OpDataConv* data) { + bool has_bias = node->inputs->size == 3; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params.padding; + data->padding = ComputePaddingHeightWidth( + params.stride_height, params.stride_width, params.dilation_height_factor, + params.dilation_width_factor, height, width, filter_height, filter_width, + padding, &out_height, &out_width); + + const TfLiteTensor* input = GetInput(context, node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteTensor* filter = GetInput(context, node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + const TfLiteTensor* bias = + GetOptionalInputTensor(context, node, kConvBiasTensor); + TfLiteTensor* output = GetOutput(context, node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. + if (data_type != kTfLiteFloat32) { + int output_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params.activation, + &data->output_multiplier, &data->output_shift, + &data->output_activation_min, &data->output_activation_max, + data->per_channel_output_multiplier, + reinterpret_cast(data->per_channel_output_shift), + output_channels)); + } + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + return kTfLiteOk; +} + +TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpDataConv* data = static_cast(node->user_data); + const auto& params = + *(static_cast(node->builtin_data)); + + TfLiteTensor* output = GetOutput(context, node, kDepthwiseConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + const TfLiteTensor* input = + GetInput(context, node, kDepthwiseConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteTensor* filter = + GetInput(context, node, kDepthwiseConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + + const int input_width = input->dims->data[2]; + const int input_height = input->dims->data[1]; + const int filter_width = filter->dims->data[2]; + const int filter_height = filter->dims->data[1]; + const int output_width = output->dims->data[2]; + const int output_height = output->dims->data[1]; + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TFLITE_DCHECK(affine_quantization != nullptr); + TFLITE_DCHECK(affine_quantization->scale != nullptr); + TFLITE_DCHECK(affine_quantization->zero_point != nullptr); + + TF_LITE_ENSURE( + context, affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kDepthwiseConvQuantizedDimension]); + + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, input->type, data)); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/dequantize.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/dequantize.cc index f4e2eb9f..b488c41a 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/dequantize.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/dequantize.cc @@ -59,8 +59,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, input->type == kTfLiteUInt8 || input->type == kTfLiteInt8 || input->type == kTfLiteInt16); - TF_LITE_ENSURE( - context, output->type == kTfLiteFloat32 || output->type == kTfLiteInt32); + TF_LITE_ENSURE(context, output->type == kTfLiteFloat32); if (output->type == kTfLiteInt32) { const double effective_output_scale = @@ -112,32 +111,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTypeGetName(output->type)); return kTfLiteError; } - } else if (output->type == kTfLiteInt32) { - int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output)); - switch (input->type) { - case kTfLiteInt16: { - reference_ops::Requantize( - tflite::micro::GetTensorData(input), flat_size, - data->output_multiplier, data->output_shift, - data->quantization_params.zero_point, data->output_zero_point, - tflite::micro::GetTensorData(output)); - break; - } - case kTfLiteInt8: { - reference_ops::Requantize( - tflite::micro::GetTensorData(input), flat_size, - data->output_multiplier, data->output_shift, - data->quantization_params.zero_point, data->output_zero_point, - tflite::micro::GetTensorData(output)); - break; - } - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } } else { TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", TfLiteTypeGetName(input->type), diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/detection_postprocess.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/detection_postprocess.cc new file mode 100644 index 00000000..532a7e83 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/detection_postprocess.cc @@ -0,0 +1,805 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#define FLATBUFFERS_LOCALE_INDEPENDENT 0 +#include "flatbuffers/flexbuffers.h" +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +/** + * This version of detection_postprocess is specific to TFLite Micro. It + * contains the following differences between the TFLite version: + * + * 1.) Temporaries (temporary tensors) - Micro use instead scratch buffer API. + * 2.) Output dimensions - the TFLite version does not support undefined out + * dimensions. So model must have static out dimensions. + */ + +// Input tensors +constexpr int kInputTensorBoxEncodings = 0; +constexpr int kInputTensorClassPredictions = 1; +constexpr int kInputTensorAnchors = 2; + +// Output tensors +constexpr int kOutputTensorDetectionBoxes = 0; +constexpr int kOutputTensorDetectionClasses = 1; +constexpr int kOutputTensorDetectionScores = 2; +constexpr int kOutputTensorNumDetections = 3; + +constexpr int kNumCoordBox = 4; +constexpr int kBatchSize = 1; + +constexpr int kNumDetectionsPerClass = 100; + +// Object Detection model produces axis-aligned boxes in two formats: +// BoxCorner represents the lower left corner (xmin, ymin) and +// the upper right corner (xmax, ymax). +// CenterSize represents the center (xcenter, ycenter), height and width. +// BoxCornerEncoding and CenterSizeEncoding are related as follows: +// ycenter = y / y_scale * anchor.h + anchor.y; +// xcenter = x / x_scale * anchor.w + anchor.x; +// half_h = 0.5*exp(h/ h_scale)) * anchor.h; +// half_w = 0.5*exp(w / w_scale)) * anchor.w; +// ymin = ycenter - half_h +// ymax = ycenter + half_h +// xmin = xcenter - half_w +// xmax = xcenter + half_w +struct BoxCornerEncoding { + float ymin; + float xmin; + float ymax; + float xmax; +}; + +struct CenterSizeEncoding { + float y; + float x; + float h; + float w; +}; +// We make sure that the memory allocations are contiguous with static_assert. +static_assert(sizeof(BoxCornerEncoding) == sizeof(float) * kNumCoordBox, + "Size of BoxCornerEncoding is 4 float values"); +static_assert(sizeof(CenterSizeEncoding) == sizeof(float) * kNumCoordBox, + "Size of CenterSizeEncoding is 4 float values"); + +struct OpData { + int max_detections; + int max_classes_per_detection; // Fast Non-Max-Suppression + int detections_per_class; // Regular Non-Max-Suppression + float non_max_suppression_score_threshold; + float intersection_over_union_threshold; + int num_classes; + bool use_regular_non_max_suppression; + CenterSizeEncoding scale_values; + + // Scratch buffers indexes + int active_candidate_idx; + int decoded_boxes_idx; + int scores_idx; + int score_buffer_idx; + int keep_scores_idx; + int scores_after_regular_non_max_suppression_idx; + int sorted_values_idx; + int keep_indices_idx; + int sorted_indices_idx; + int buffer_idx; + int selected_idx; + + // Cached tensor scale and zero point values for quantized operations + TfLiteQuantizationParams input_box_encodings; + TfLiteQuantizationParams input_class_predictions; + TfLiteQuantizationParams input_anchors; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + OpData* op_data = nullptr; + + const uint8_t* buffer_t = reinterpret_cast(buffer); + const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); + + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + op_data = reinterpret_cast( + context->AllocatePersistentBuffer(context, sizeof(OpData))); + + op_data->max_detections = m["max_detections"].AsInt32(); + op_data->max_classes_per_detection = m["max_classes_per_detection"].AsInt32(); + if (m["detections_per_class"].IsNull()) + op_data->detections_per_class = kNumDetectionsPerClass; + else + op_data->detections_per_class = m["detections_per_class"].AsInt32(); + if (m["use_regular_nms"].IsNull()) + op_data->use_regular_non_max_suppression = false; + else + op_data->use_regular_non_max_suppression = m["use_regular_nms"].AsBool(); + + op_data->non_max_suppression_score_threshold = + m["nms_score_threshold"].AsFloat(); + op_data->intersection_over_union_threshold = m["nms_iou_threshold"].AsFloat(); + op_data->num_classes = m["num_classes"].AsInt32(); + op_data->scale_values.y = m["y_scale"].AsFloat(); + op_data->scale_values.x = m["x_scale"].AsFloat(); + op_data->scale_values.h = m["h_scale"].AsFloat(); + op_data->scale_values.w = m["w_scale"].AsFloat(); + + return op_data; +} + +void Free(TfLiteContext* context, void* buffer) {} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + auto* op_data = static_cast(node->user_data); + + // Inputs: box_encodings, scores, anchors + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + const TfLiteTensor* input_box_encodings = + GetInput(context, node, kInputTensorBoxEncodings); + const TfLiteTensor* input_class_predictions = + GetInput(context, node, kInputTensorClassPredictions); + const TfLiteTensor* input_anchors = + GetInput(context, node, kInputTensorAnchors); + TF_LITE_ENSURE_EQ(context, NumDimensions(input_box_encodings), 3); + TF_LITE_ENSURE_EQ(context, NumDimensions(input_class_predictions), 3); + TF_LITE_ENSURE_EQ(context, NumDimensions(input_anchors), 2); + + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4); + const int num_boxes = input_box_encodings->dims->data[1]; + const int num_classes = op_data->num_classes; + + op_data->input_box_encodings.scale = input_box_encodings->params.scale; + op_data->input_box_encodings.zero_point = + input_box_encodings->params.zero_point; + op_data->input_class_predictions.scale = + input_class_predictions->params.scale; + op_data->input_class_predictions.zero_point = + input_class_predictions->params.zero_point; + op_data->input_anchors.scale = input_anchors->params.scale; + op_data->input_anchors.zero_point = input_anchors->params.zero_point; + + // Scratch tensors + context->RequestScratchBufferInArena(context, num_boxes, + &op_data->active_candidate_idx); + context->RequestScratchBufferInArena(context, + num_boxes * kNumCoordBox * sizeof(float), + &op_data->decoded_boxes_idx); + context->RequestScratchBufferInArena( + context, + input_class_predictions->dims->data[1] * + input_class_predictions->dims->data[2] * sizeof(float), + &op_data->scores_idx); + + // Additional buffers + context->RequestScratchBufferInArena(context, num_boxes * sizeof(float), + &op_data->score_buffer_idx); + context->RequestScratchBufferInArena(context, num_boxes * sizeof(float), + &op_data->keep_scores_idx); + context->RequestScratchBufferInArena( + context, op_data->max_detections * num_boxes * sizeof(float), + &op_data->scores_after_regular_non_max_suppression_idx); + context->RequestScratchBufferInArena( + context, op_data->max_detections * num_boxes * sizeof(float), + &op_data->sorted_values_idx); + context->RequestScratchBufferInArena(context, num_boxes * sizeof(int), + &op_data->keep_indices_idx); + context->RequestScratchBufferInArena( + context, op_data->max_detections * num_boxes * sizeof(int), + &op_data->sorted_indices_idx); + int buffer_size = std::max(num_classes, op_data->max_detections); + context->RequestScratchBufferInArena( + context, buffer_size * num_boxes * sizeof(int), &op_data->buffer_idx); + buffer_size = std::min(num_boxes, op_data->max_detections); + context->RequestScratchBufferInArena( + context, buffer_size * num_boxes * sizeof(int), &op_data->selected_idx); + + // Outputs: detection_boxes, detection_scores, detection_classes, + // num_detections + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4); + + return kTfLiteOk; +} + +class Dequantizer { + public: + Dequantizer(int zero_point, float scale) + : zero_point_(zero_point), scale_(scale) {} + float operator()(uint8_t x) { + return (static_cast(x) - zero_point_) * scale_; + } + + private: + int zero_point_; + float scale_; +}; + +void DequantizeBoxEncodings(const TfLiteEvalTensor* input_box_encodings, + int idx, float quant_zero_point, float quant_scale, + int length_box_encoding, + CenterSizeEncoding* box_centersize) { + const uint8_t* boxes = + tflite::micro::GetTensorData(input_box_encodings) + + length_box_encoding * idx; + Dequantizer dequantize(quant_zero_point, quant_scale); + // See definition of the KeyPointBoxCoder at + // https://github.com/tensorflow/models/blob/master/research/object_detection/box_coders/keypoint_box_coder.py + // The first four elements are the box coordinates, which is the same as the + // FastRnnBoxCoder at + // https://github.com/tensorflow/models/blob/master/research/object_detection/box_coders/faster_rcnn_box_coder.py + box_centersize->y = dequantize(boxes[0]); + box_centersize->x = dequantize(boxes[1]); + box_centersize->h = dequantize(boxes[2]); + box_centersize->w = dequantize(boxes[3]); +} + +template +T ReInterpretTensor(const TfLiteEvalTensor* tensor) { + const float* tensor_base = tflite::micro::GetTensorData(tensor); + return reinterpret_cast(tensor_base); +} + +template +T ReInterpretTensor(TfLiteEvalTensor* tensor) { + float* tensor_base = tflite::micro::GetTensorData(tensor); + return reinterpret_cast(tensor_base); +} + +TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node, + OpData* op_data) { + // Parse input tensor boxencodings + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); + TF_LITE_ENSURE_EQ(context, input_box_encodings->dims->data[0], kBatchSize); + const int num_boxes = input_box_encodings->dims->data[1]; + TF_LITE_ENSURE(context, input_box_encodings->dims->data[2] >= kNumCoordBox); + const TfLiteEvalTensor* input_anchors = + tflite::micro::GetEvalInput(context, node, kInputTensorAnchors); + + // Decode the boxes to get (ymin, xmin, ymax, xmax) based on the anchors + CenterSizeEncoding box_centersize; + CenterSizeEncoding scale_values = op_data->scale_values; + CenterSizeEncoding anchor; + for (int idx = 0; idx < num_boxes; ++idx) { + switch (input_box_encodings->type) { + // Quantized + case kTfLiteUInt8: + DequantizeBoxEncodings( + input_box_encodings, idx, + static_cast(op_data->input_box_encodings.zero_point), + static_cast(op_data->input_box_encodings.scale), + input_box_encodings->dims->data[2], &box_centersize); + DequantizeBoxEncodings( + input_anchors, idx, + static_cast(op_data->input_anchors.zero_point), + static_cast(op_data->input_anchors.scale), kNumCoordBox, + &anchor); + break; + // Float + case kTfLiteFloat32: { + // Please see DequantizeBoxEncodings function for the support detail. + const int box_encoding_idx = idx * input_box_encodings->dims->data[2]; + const float* boxes = &(tflite::micro::GetTensorData( + input_box_encodings)[box_encoding_idx]); + box_centersize = *reinterpret_cast(boxes); + anchor = + ReInterpretTensor(input_anchors)[idx]; + break; + } + default: + // Unsupported type. + return kTfLiteError; + } + + float ycenter = static_cast(static_cast(box_centersize.y) / + static_cast(scale_values.y) * + static_cast(anchor.h) + + static_cast(anchor.y)); + + float xcenter = static_cast(static_cast(box_centersize.x) / + static_cast(scale_values.x) * + static_cast(anchor.w) + + static_cast(anchor.x)); + + float half_h = + static_cast(0.5 * + (std::exp(static_cast(box_centersize.h) / + static_cast(scale_values.h))) * + static_cast(anchor.h)); + float half_w = + static_cast(0.5 * + (std::exp(static_cast(box_centersize.w) / + static_cast(scale_values.w))) * + static_cast(anchor.w)); + + float* decoded_boxes = reinterpret_cast( + context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); + auto& box = reinterpret_cast(decoded_boxes)[idx]; + box.ymin = ycenter - half_h; + box.xmin = xcenter - half_w; + box.ymax = ycenter + half_h; + box.xmax = xcenter + half_w; + } + return kTfLiteOk; +} + +void DecreasingPartialArgSort(const float* values, int num_values, + int num_to_sort, int* indices) { + std::iota(indices, indices + num_values, 0); + std::partial_sort( + indices, indices + num_to_sort, indices + num_values, + [&values](const int i, const int j) { return values[i] > values[j]; }); +} + +int SelectDetectionsAboveScoreThreshold(const float* values, int size, + const float threshold, + float* keep_values, int* keep_indices) { + int counter = 0; + for (int i = 0; i < size; i++) { + if (values[i] >= threshold) { + keep_values[counter] = values[i]; + keep_indices[counter] = i; + counter++; + } + } + return counter; +} + +bool ValidateBoxes(const float* decoded_boxes, const int num_boxes) { + for (int i = 0; i < num_boxes; ++i) { + // ymax>=ymin, xmax>=xmin + auto& box = reinterpret_cast(decoded_boxes)[i]; + if (box.ymin >= box.ymax || box.xmin >= box.xmax) { + return false; + } + } + return true; +} + +float ComputeIntersectionOverUnion(const float* decoded_boxes, const int i, + const int j) { + auto& box_i = reinterpret_cast(decoded_boxes)[i]; + auto& box_j = reinterpret_cast(decoded_boxes)[j]; + const float area_i = (box_i.ymax - box_i.ymin) * (box_i.xmax - box_i.xmin); + const float area_j = (box_j.ymax - box_j.ymin) * (box_j.xmax - box_j.xmin); + if (area_i <= 0 || area_j <= 0) return 0.0; + const float intersection_ymin = std::max(box_i.ymin, box_j.ymin); + const float intersection_xmin = std::max(box_i.xmin, box_j.xmin); + const float intersection_ymax = std::min(box_i.ymax, box_j.ymax); + const float intersection_xmax = std::min(box_i.xmax, box_j.xmax); + const float intersection_area = + std::max(intersection_ymax - intersection_ymin, 0.0) * + std::max(intersection_xmax - intersection_xmin, 0.0); + return intersection_area / (area_i + area_j - intersection_area); +} + +// NonMaxSuppressionSingleClass() prunes out the box locations with high overlap +// before selecting the highest scoring boxes (max_detections in number) +// It assumes all boxes are good in beginning and sorts based on the scores. +// If lower-scoring box has too much overlap with a higher-scoring box, +// we get rid of the lower-scoring box. +// Complexity is O(N^2) pairwise comparison between boxes +TfLiteStatus NonMaxSuppressionSingleClassHelper( + TfLiteContext* context, TfLiteNode* node, OpData* op_data, + const float* scores, int* selected, int* selected_size, + int max_detections) { + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); + const int num_boxes = input_box_encodings->dims->data[1]; + const float non_max_suppression_score_threshold = + op_data->non_max_suppression_score_threshold; + const float intersection_over_union_threshold = + op_data->intersection_over_union_threshold; + // Maximum detections should be positive. + TF_LITE_ENSURE(context, (max_detections >= 0)); + // intersection_over_union_threshold should be positive + // and should be less than 1. + TF_LITE_ENSURE(context, (intersection_over_union_threshold > 0.0f) && + (intersection_over_union_threshold <= 1.0f)); + // Validate boxes + float* decoded_boxes = reinterpret_cast( + context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); + + TF_LITE_ENSURE(context, ValidateBoxes(decoded_boxes, num_boxes)); + + // threshold scores + int* keep_indices = reinterpret_cast( + context->GetScratchBuffer(context, op_data->keep_indices_idx)); + float* keep_scores = reinterpret_cast( + context->GetScratchBuffer(context, op_data->keep_scores_idx)); + int num_scores_kept = SelectDetectionsAboveScoreThreshold( + scores, num_boxes, non_max_suppression_score_threshold, keep_scores, + keep_indices); + int* sorted_indices = reinterpret_cast( + context->GetScratchBuffer(context, op_data->sorted_indices_idx)); + + DecreasingPartialArgSort(keep_scores, num_scores_kept, num_scores_kept, + sorted_indices); + + const int num_boxes_kept = num_scores_kept; + const int output_size = std::min(num_boxes_kept, max_detections); + *selected_size = 0; + + int num_active_candidate = num_boxes_kept; + uint8_t* active_box_candidate = reinterpret_cast( + context->GetScratchBuffer(context, op_data->active_candidate_idx)); + + for (int row = 0; row < num_boxes_kept; row++) { + active_box_candidate[row] = 1; + } + for (int i = 0; i < num_boxes_kept; ++i) { + if (num_active_candidate == 0 || *selected_size >= output_size) break; + if (active_box_candidate[i] == 1) { + selected[(*selected_size)++] = keep_indices[sorted_indices[i]]; + active_box_candidate[i] = 0; + num_active_candidate--; + } else { + continue; + } + for (int j = i + 1; j < num_boxes_kept; ++j) { + if (active_box_candidate[j] == 1) { + float intersection_over_union = ComputeIntersectionOverUnion( + decoded_boxes, keep_indices[sorted_indices[i]], + keep_indices[sorted_indices[j]]); + + if (intersection_over_union > intersection_over_union_threshold) { + active_box_candidate[j] = 0; + num_active_candidate--; + } + } + } + } + + return kTfLiteOk; +} + +// This function implements a regular version of Non Maximal Suppression (NMS) +// for multiple classes where +// 1) we do NMS separately for each class across all anchors and +// 2) keep only the highest anchor scores across all classes +// 3) The worst runtime of the regular NMS is O(K*N^2) +// where N is the number of anchors and K the number of +// classes. +TfLiteStatus NonMaxSuppressionMultiClassRegularHelper(TfLiteContext* context, + TfLiteNode* node, + OpData* op_data, + const float* scores) { + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); + const TfLiteEvalTensor* input_class_predictions = + tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); + TfLiteEvalTensor* detection_boxes = + tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionBoxes); + TfLiteEvalTensor* detection_classes = tflite::micro::GetEvalOutput( + context, node, kOutputTensorDetectionClasses); + TfLiteEvalTensor* detection_scores = + tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionScores); + TfLiteEvalTensor* num_detections = + tflite::micro::GetEvalOutput(context, node, kOutputTensorNumDetections); + + const int num_boxes = input_box_encodings->dims->data[1]; + const int num_classes = op_data->num_classes; + const int num_detections_per_class = op_data->detections_per_class; + const int max_detections = op_data->max_detections; + const int num_classes_with_background = + input_class_predictions->dims->data[2]; + // The row index offset is 1 if background class is included and 0 otherwise. + int label_offset = num_classes_with_background - num_classes; + TF_LITE_ENSURE(context, num_detections_per_class > 0); + + // For each class, perform non-max suppression. + float* class_scores = reinterpret_cast( + context->GetScratchBuffer(context, op_data->score_buffer_idx)); + int* box_indices_after_regular_non_max_suppression = reinterpret_cast( + context->GetScratchBuffer(context, op_data->buffer_idx)); + float* scores_after_regular_non_max_suppression = + reinterpret_cast(context->GetScratchBuffer( + context, op_data->scores_after_regular_non_max_suppression_idx)); + + int size_of_sorted_indices = 0; + int* sorted_indices = reinterpret_cast( + context->GetScratchBuffer(context, op_data->sorted_indices_idx)); + float* sorted_values = reinterpret_cast( + context->GetScratchBuffer(context, op_data->sorted_values_idx)); + + for (int col = 0; col < num_classes; col++) { + for (int row = 0; row < num_boxes; row++) { + // Get scores of boxes corresponding to all anchors for single class + class_scores[row] = + *(scores + row * num_classes_with_background + col + label_offset); + } + // Perform non-maximal suppression on single class + int selected_size = 0; + int* selected = reinterpret_cast( + context->GetScratchBuffer(context, op_data->selected_idx)); + TF_LITE_ENSURE_STATUS(NonMaxSuppressionSingleClassHelper( + context, node, op_data, class_scores, selected, &selected_size, + num_detections_per_class)); + // Add selected indices from non-max suppression of boxes in this class + int output_index = size_of_sorted_indices; + for (int i = 0; i < selected_size; i++) { + int selected_index = selected[i]; + + box_indices_after_regular_non_max_suppression[output_index] = + (selected_index * num_classes_with_background + col + label_offset); + scores_after_regular_non_max_suppression[output_index] = + class_scores[selected_index]; + output_index++; + } + // Sort the max scores among the selected indices + // Get the indices for top scores + int num_indices_to_sort = std::min(output_index, max_detections); + DecreasingPartialArgSort(scores_after_regular_non_max_suppression, + output_index, num_indices_to_sort, sorted_indices); + + // Copy values to temporary vectors + for (int row = 0; row < num_indices_to_sort; row++) { + int temp = sorted_indices[row]; + sorted_indices[row] = box_indices_after_regular_non_max_suppression[temp]; + sorted_values[row] = scores_after_regular_non_max_suppression[temp]; + } + // Copy scores and indices from temporary vectors + for (int row = 0; row < num_indices_to_sort; row++) { + box_indices_after_regular_non_max_suppression[row] = sorted_indices[row]; + scores_after_regular_non_max_suppression[row] = sorted_values[row]; + } + size_of_sorted_indices = num_indices_to_sort; + } + + // Allocate output tensors + for (int output_box_index = 0; output_box_index < max_detections; + output_box_index++) { + if (output_box_index < size_of_sorted_indices) { + const int anchor_index = floor( + box_indices_after_regular_non_max_suppression[output_box_index] / + num_classes_with_background); + const int class_index = + box_indices_after_regular_non_max_suppression[output_box_index] - + anchor_index * num_classes_with_background - label_offset; + const float selected_score = + scores_after_regular_non_max_suppression[output_box_index]; + // detection_boxes + float* decoded_boxes = reinterpret_cast( + context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); + ReInterpretTensor(detection_boxes)[output_box_index] = + reinterpret_cast(decoded_boxes)[anchor_index]; + // detection_classes + tflite::micro::GetTensorData(detection_classes)[output_box_index] = + class_index; + // detection_scores + tflite::micro::GetTensorData(detection_scores)[output_box_index] = + selected_score; + } else { + ReInterpretTensor( + detection_boxes)[output_box_index] = {0.0f, 0.0f, 0.0f, 0.0f}; + // detection_classes + tflite::micro::GetTensorData(detection_classes)[output_box_index] = + 0.0f; + // detection_scores + tflite::micro::GetTensorData(detection_scores)[output_box_index] = + 0.0f; + } + } + tflite::micro::GetTensorData(num_detections)[0] = + size_of_sorted_indices; + + return kTfLiteOk; +} + +// This function implements a fast version of Non Maximal Suppression for +// multiple classes where +// 1) we keep the top-k scores for each anchor and +// 2) during NMS, each anchor only uses the highest class score for sorting. +// 3) Compared to standard NMS, the worst runtime of this version is O(N^2) +// instead of O(KN^2) where N is the number of anchors and K the number of +// classes. +TfLiteStatus NonMaxSuppressionMultiClassFastHelper(TfLiteContext* context, + TfLiteNode* node, + OpData* op_data, + const float* scores) { + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); + const TfLiteEvalTensor* input_class_predictions = + tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); + TfLiteEvalTensor* detection_boxes = + tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionBoxes); + + TfLiteEvalTensor* detection_classes = tflite::micro::GetEvalOutput( + context, node, kOutputTensorDetectionClasses); + TfLiteEvalTensor* detection_scores = + tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionScores); + TfLiteEvalTensor* num_detections = + tflite::micro::GetEvalOutput(context, node, kOutputTensorNumDetections); + + const int num_boxes = input_box_encodings->dims->data[1]; + const int num_classes = op_data->num_classes; + const int max_categories_per_anchor = op_data->max_classes_per_detection; + const int num_classes_with_background = + input_class_predictions->dims->data[2]; + + // The row index offset is 1 if background class is included and 0 otherwise. + int label_offset = num_classes_with_background - num_classes; + TF_LITE_ENSURE(context, (max_categories_per_anchor > 0)); + const int num_categories_per_anchor = + std::min(max_categories_per_anchor, num_classes); + float* max_scores = reinterpret_cast( + context->GetScratchBuffer(context, op_data->score_buffer_idx)); + int* sorted_class_indices = reinterpret_cast( + context->GetScratchBuffer(context, op_data->buffer_idx)); + + for (int row = 0; row < num_boxes; row++) { + const float* box_scores = + scores + row * num_classes_with_background + label_offset; + int* class_indices = sorted_class_indices + row * num_classes; + DecreasingPartialArgSort(box_scores, num_classes, num_categories_per_anchor, + class_indices); + max_scores[row] = box_scores[class_indices[0]]; + } + + // Perform non-maximal suppression on max scores + int selected_size = 0; + int* selected = reinterpret_cast( + context->GetScratchBuffer(context, op_data->selected_idx)); + TF_LITE_ENSURE_STATUS(NonMaxSuppressionSingleClassHelper( + context, node, op_data, max_scores, selected, &selected_size, + op_data->max_detections)); + + // Allocate output tensors + int output_box_index = 0; + + for (int i = 0; i < selected_size; i++) { + int selected_index = selected[i]; + + const float* box_scores = + scores + selected_index * num_classes_with_background + label_offset; + const int* class_indices = + sorted_class_indices + selected_index * num_classes; + + for (int col = 0; col < num_categories_per_anchor; ++col) { + int box_offset = num_categories_per_anchor * output_box_index + col; + + // detection_boxes + float* decoded_boxes = reinterpret_cast( + context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); + ReInterpretTensor(detection_boxes)[box_offset] = + reinterpret_cast(decoded_boxes)[selected_index]; + + // detection_classes + tflite::micro::GetTensorData(detection_classes)[box_offset] = + class_indices[col]; + + // detection_scores + tflite::micro::GetTensorData(detection_scores)[box_offset] = + box_scores[class_indices[col]]; + + output_box_index++; + } + } + + tflite::micro::GetTensorData(num_detections)[0] = output_box_index; + return kTfLiteOk; +} + +void DequantizeClassPredictions(const TfLiteEvalTensor* input_class_predictions, + const int num_boxes, + const int num_classes_with_background, + float* scores, OpData* op_data) { + float quant_zero_point = + static_cast(op_data->input_class_predictions.zero_point); + float quant_scale = + static_cast(op_data->input_class_predictions.scale); + Dequantizer dequantize(quant_zero_point, quant_scale); + const uint8_t* scores_quant = + tflite::micro::GetTensorData(input_class_predictions); + for (int idx = 0; idx < num_boxes * num_classes_with_background; ++idx) { + scores[idx] = dequantize(scores_quant[idx]); + } +} + +TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context, + TfLiteNode* node, OpData* op_data) { + // Get the input tensors + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); + const TfLiteEvalTensor* input_class_predictions = + tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); + const int num_boxes = input_box_encodings->dims->data[1]; + const int num_classes = op_data->num_classes; + + TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[0], + kBatchSize); + TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[1], num_boxes); + const int num_classes_with_background = + input_class_predictions->dims->data[2]; + + TF_LITE_ENSURE(context, (num_classes_with_background - num_classes <= 1)); + TF_LITE_ENSURE(context, (num_classes_with_background >= num_classes)); + + const float* scores; + switch (input_class_predictions->type) { + case kTfLiteUInt8: { + float* temporary_scores = reinterpret_cast( + context->GetScratchBuffer(context, op_data->scores_idx)); + DequantizeClassPredictions(input_class_predictions, num_boxes, + num_classes_with_background, temporary_scores, + op_data); + scores = temporary_scores; + } break; + case kTfLiteFloat32: + scores = tflite::micro::GetTensorData(input_class_predictions); + break; + default: + // Unsupported type. + return kTfLiteError; + } + + if (op_data->use_regular_non_max_suppression) { + TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassRegularHelper( + context, node, op_data, scores)); + } else { + TF_LITE_ENSURE_STATUS( + NonMaxSuppressionMultiClassFastHelper(context, node, op_data, scores)); + } + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE(context, (kBatchSize == 1)); + auto* op_data = static_cast(node->user_data); + + // These two functions correspond to two blocks in the Object Detection model. + // In future, we would like to break the custom op in two blocks, which is + // currently not feasible because we would like to input quantized inputs + // and do all calculations in float. Mixed quantized/float calculations are + // currently not supported in TFLite. + + // This fills in temporary decoded_boxes + // by transforming input_box_encodings and input_anchors from + // CenterSizeEncodings to BoxCornerEncoding + TF_LITE_ENSURE_STATUS(DecodeCenterSizeBoxes(context, node, op_data)); + + // This fills in the output tensors + // by choosing effective set of decoded boxes + // based on Non Maximal Suppression, i.e. selecting + // highest scoring non-overlapping boxes. + TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClass(context, node, op_data)); + + return kTfLiteOk; +} +} // namespace + +TfLiteRegistration* Register_DETECTION_POSTPROCESS() { + static TfLiteRegistration r = {/*init=*/Init, + /*free=*/Free, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; + return &r; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h new file mode 100644 index 00000000..f5b9eae0 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/detection_postprocess_flexbuffers_generated_data.h @@ -0,0 +1,25 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H +#define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H + +extern const int g_gen_data_size_none_regular_nms; +extern const unsigned char g_gen_data_none_regular_nms[]; + +extern const int g_gen_data_size_regular_nms; +extern const unsigned char g_gen_data_regular_nms[]; + +#endif diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/div.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/div.cc new file mode 100644 index 00000000..7d7783bf --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/div.cc @@ -0,0 +1,206 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/div.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + // Parameters used in the quantized paths where the output is 8bit + int32_t input1_zero_point; + int32_t input2_zero_point; + int32_t output_zero_point; + int32_t output_activation_min; + int32_t output_activation_max; + + // Parameters used in all quantized paths + int32_t output_multiplier; + int output_shift; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, + TfLiteDivParams* params, OpData* data) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + const TfLiteTensor* input1; + TF_LITE_ENSURE_OK(context, + GetInputSafe(context, node, kInputTensor1, &input1)); + const TfLiteTensor* input2; + TF_LITE_ENSURE_OK(context, + GetInputSafe(context, node, kInputTensor2, &input2)); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); + + if (output->type == kTfLiteInt8) { + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + const double real_multiplier = static_cast( + input1->params.scale / (input2->params.scale * output->params.scale)); + QuantizeMultiplier(real_multiplier, &data->output_multiplier, + &data->output_shift); + data->input1_zero_point = input1->params.zero_point; + data->input2_zero_point = input2->params.zero_point; + data->output_zero_point = output->params.zero_point; + } + + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + auto* params = static_cast(node->builtin_data); + auto* data = static_cast(node->user_data); + return CalculateOpData(context, node, params, data); +} + +void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params, + const OpData* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + +#define TF_LITE_DIV(type, opname, data_type) \ + data_type output_activation_min, output_activation_max; \ + CalculateActivationRange(params->activation, &output_activation_min, \ + &output_activation_max); \ + SetActivationParams(output_activation_min, output_activation_max, \ + &op_params); \ + type::opname(op_params, tflite::micro::GetTensorShape(input1), \ + tflite::micro::GetTensorData(input1), \ + tflite::micro::GetTensorShape(input2), \ + tflite::micro::GetTensorData(input2), \ + tflite::micro::GetTensorShape(output), \ + tflite::micro::GetTensorData(output)) + + bool requires_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (requires_broadcast) { + TF_LITE_DIV(reference_ops, BroadcastDivSlow, float); + } else { + TF_LITE_DIV(reference_ops, Div, float); + } +#undef TF_LITE_DIV +} + +TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteDivParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + +#define TF_LITE_DIV(type, opname, dtype) \ + type::opname(op_params, tflite::micro::GetTensorShape(input1), \ + tflite::micro::GetTensorData(input1), \ + tflite::micro::GetTensorShape(input2), \ + tflite::micro::GetTensorData(input2), \ + tflite::micro::GetTensorShape(output), \ + tflite::micro::GetTensorData(output)) + + if (input1->type == kTfLiteInt8 && input2->type == kTfLiteInt8 && + output->type == kTfLiteInt8) { + SetActivationParams(data->output_activation_min, + data->output_activation_max, &op_params); + op_params.input1_offset = -data->input1_zero_point; + op_params.input2_offset = -data->input2_zero_point; + op_params.output_offset = data->output_zero_point; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + + bool requires_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (requires_broadcast) { + TF_LITE_DIV(reference_ops, BroadcastDivSlow, int8_t); + } else { + TF_LITE_DIV(reference_ops, Div, int8_t); + } +#undef TF_LITE_DIV + } else { + TF_LITE_KERNEL_LOG( + context, "Unsupported combination of input and output types in DIV."); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = static_cast(node->builtin_data); + TFLITE_DCHECK(node->user_data != nullptr); + auto* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteFloat32) { + EvalDiv(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt8) { + TF_LITE_ENSURE_OK(context, EvalQuantized(context, node, params, data, + input1, input2, output)); + } else { + TF_LITE_KERNEL_LOG(context, + "DIV only supports FLOAT32, quantized INT8 " + "now, got type %s (%d).", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_DIV() { + return {/*init=*/Init, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/elu.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/elu.cc new file mode 100644 index 00000000..a3b81071 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/elu.cc @@ -0,0 +1,151 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/elu.h" + +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/cppmath.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// OLD-TODO(b/142762739): We should figure out a multi-threading plan for most +// of the activation ops below. + +struct OpData { + int8_t table[256]; +}; + +using TransformFunc = float (*)(float); + +template +void PopulateLookupTable(const TfLiteTensor* input, const TfLiteTensor* output, + const TransformFunc transform, OpData* data) { + if (sizeof(T) != 1) TF_LITE_FATAL("Lookup table valid only for 8bit"); + + const float inverse_scale = 1 / output->params.scale; + int32_t maxval = std::numeric_limits::max(); + int32_t minval = std::numeric_limits::min(); + for (int32_t val = minval; val <= maxval; ++val) { + const float dequantized = + input->params.scale * (val - input->params.zero_point); + const float transformed = transform(dequantized); + const float rescaled = TfLiteRound(transformed * inverse_scale); + const int32_t quantized = + static_cast(rescaled + output->params.zero_point); + data->table[static_cast(static_cast(val))] = + static_cast(std::max(std::min(maxval, quantized), minval)); + } +} + +// OLD-TODO(b/143696793): move this to optimized_ops. +void EvalUsingLookupTable(const OpData* data, const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + const int size = MatchingFlatSize(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output)); + int8_t* output_data = tflite::micro::GetTensorData(output); + const int8_t* input_data = tflite::micro::GetTensorData(input); + + for (int i = 0; i < size; ++i) { + output_data[i] = data->table[static_cast(input_data[i])]; + } +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + // Use LUT to handle quantized elu path. + if (input->type == kTfLiteInt8) { + OpData* data = static_cast(node->user_data); + TransformFunc transform = [](float value) { + return value < 0.0f ? std::exp(value) - 1.0f : value; + }; + PopulateLookupTable(input, output, transform, data); + } + + return kTfLiteOk; +} + +void* EluInit(TfLiteContext* context, const char* buffer, size_t length) { + // This is a builtin op, so we don't use the contents in 'buffer', if any. + // Instead, we allocate a new object to carry information from Prepare() to + // Eval(). + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + switch (input->type) { + case kTfLiteFloat32: { + reference_ops::Elu(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + case kTfLiteInt8: { + const OpData* data = static_cast(node->user_data); + EvalUsingLookupTable(data, input, output); + return kTfLiteOk; + } + default: + TF_LITE_KERNEL_LOG( + context, "ELU only supports float32 and int8 currently, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } +} + +} // namespace + +TfLiteRegistration Register_ELU() { + return {/*init=*/EluInit, + /*free=*/nullptr, + /*prepare=*/EluPrepare, + /*invoke=*/EluEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/ethosu.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/ethosu.cc index eac6cea8..c305121e 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/ethosu.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/ethosu.cc @@ -19,14 +19,9 @@ limitations under the License. #include "tensorflow/lite/c/common.h" namespace tflite { -namespace ops { -namespace micro { -namespace custom { + TfLiteRegistration* Register_ETHOSU() { return nullptr; } const char* GetString_ETHOSU() { return ""; } -} // namespace custom -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/ethosu.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/ethosu.h new file mode 100644 index 00000000..cfbb0d3f --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/ethosu.h @@ -0,0 +1,28 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_ + +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +TfLiteRegistration* Register_ETHOSU(); + +const char* GetString_ETHOSU(); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/exp.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/exp.cc new file mode 100644 index 00000000..253769a3 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/exp.cc @@ -0,0 +1,78 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/exp.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); + TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); + TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); + for (int i = 0; i < output->dims->size; ++i) { + TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); + } + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output)); + + if (input->type == kTfLiteFloat32) { + reference_ops::Exp(tflite::micro::GetTensorData(input), + static_cast(flat_size), + tflite::micro::GetTensorData(output)); + } else { + TF_LITE_KERNEL_LOG(context, "Type %s (%d) currently not supported by Exp.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} +} // namespace + +TfLiteRegistration Register_EXP() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/expand_dims.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/expand_dims.cc new file mode 100644 index 00000000..1f105212 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/expand_dims.cc @@ -0,0 +1,152 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kAxisTensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus ExpandTensorDim(TfLiteContext* context, + const TfLiteEvalTensor* input, int32_t axis, + TfLiteEvalTensor* output) { + const TfLiteIntArray* input_dims = input->dims; + TfLiteIntArray* output_dims = output->dims; + if (axis < 0) { + axis = input_dims->size + 1 + axis; + } + TF_LITE_ENSURE(context, (axis <= input_dims->size)); + + output_dims->size = input_dims->size + 1; + for (int i = 0; i < output_dims->size; ++i) { + if (i < axis) { + output_dims->data[i] = input_dims->data[i]; + } else if (i == axis) { + output_dims->data[i] = 1; + } else { + output_dims->data[i] = input_dims->data[i - 1]; + } + } + return kTfLiteOk; +} + +TfLiteStatus GetAxisValueFromTensor(TfLiteContext* context, + const TfLiteEvalTensor* axis, + int32_t* axis_value) { + const int axis_dims = (tflite::micro::GetTensorShape(axis)).DimensionsCount(); + if (axis_dims > 1) { + TF_LITE_KERNEL_LOG(context, "Axis has only one element for Expand_Dims.", + axis_dims); + return kTfLiteError; + } + + if (kTfLiteInt32 == (axis->type)) { + const int32_t* axis_ptr = tflite::micro::GetTensorData(axis); + *axis_value = axis_ptr[0]; + return kTfLiteOk; + } else { + TF_LITE_KERNEL_LOG(context, + "Axis type %s (%d) not supported by Expand_Dims.", + TfLiteTypeGetName(axis->type), axis->type); + return kTfLiteError; + } +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); + const TfLiteTensor* axis; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxisTensor, &axis)); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + output->type = input->type; + if (IsDynamicTensor(axis)) { + TF_LITE_KERNEL_LOG(context, + "DynamicTensor is not yet supported by Expand_Dims."); + return kTfLiteError; + } + return kTfLiteOk; +} + +template +void memCopyN(T* out, const T* in, const int num_elements) { + for (int i = 0; i < num_elements; ++i) { + out[i] = in[i]; + } +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* axis = + tflite::micro::GetEvalInput(context, node, kAxisTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const int flat_size = ElementCount(*input->dims); + const int input_dims = input->dims->size; + + int32_t axis_value; + TF_LITE_ENSURE_OK(context, + GetAxisValueFromTensor(context, axis, &axis_value)); + if ((axis_value > static_cast(input_dims)) || + (axis_value < static_cast(-(input_dims + 1)))) { + TF_LITE_KERNEL_LOG(context, "Invalid Expand_Dims axis value (%d).", + axis_value); + return kTfLiteError; + } + ExpandTensorDim(context, input, axis_value, output); + + switch (input->type) { + case kTfLiteFloat32: { + memCopyN(tflite::micro::GetTensorData(output), + tflite::micro::GetTensorData(input), flat_size); + } break; + case kTfLiteInt8: { + memCopyN(tflite::micro::GetTensorData(output), + tflite::micro::GetTensorData(input), flat_size); + } break; + default: + TF_LITE_KERNEL_LOG( + context, + "Expand_Dims only currently supports int8 and float32, got %d.", + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} +} // namespace + +TfLiteRegistration Register_EXPAND_DIMS() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/fill.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/fill.cc new file mode 100644 index 00000000..ca3d15e1 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/fill.cc @@ -0,0 +1,131 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/fill.h" + +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +namespace { + +template +TfLiteStatus EnsureEqImpl(TfLiteContext* context, const TfLiteIntArray* array, + const TfLiteTensor* tensor) { + for (int i = 0; i < array->size; ++i) { + TF_LITE_ENSURE_EQ(context, array->data[i], GetTensorData(tensor)[i]); + } + return kTfLiteOk; +} + +// Ensure the equality of an int array and a tensor, which must be +// one-dimensional and of an integer type. +TfLiteStatus EnsureEq(TfLiteContext* context, const TfLiteIntArray* array, + const TfLiteTensor* tensor) { + TF_LITE_ENSURE_EQ(context, NumDimensions(tensor), 1); + const auto tensor_len = tensor->dims->data[0]; + TF_LITE_ENSURE_EQ(context, array->size, tensor_len); + + switch (tensor->type) { + case kTfLiteInt8: + return EnsureEqImpl(context, array, tensor); + case kTfLiteUInt8: + return EnsureEqImpl(context, array, tensor); + case kTfLiteInt16: + return EnsureEqImpl(context, array, tensor); + case kTfLiteInt32: + return EnsureEqImpl(context, array, tensor); + case kTfLiteInt64: + return EnsureEqImpl(context, array, tensor); + default: + TF_LITE_KERNEL_LOG(context, + "cannot compare int array to tensor of type %d.", + tensor->type); + return kTfLiteError; + } +} + +constexpr int kDimsTensor = 0; +constexpr int kValueTensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + // Ensure inputs and outputs exist. + const TfLiteTensor* dims; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDimsTensor, &dims)); + const TfLiteTensor* value; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value)); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + + // The value tensor must be a scalar. + TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0); + + // The value type and output type must match. + TF_LITE_ENSURE_EQ(context, value->type, output->type); + + // The dims tensor must match the output tensor shape. As a byproduct, + // ensures the dims tensor is of an integer type. + TF_LITE_ENSURE_OK(context, EnsureEq(context, output->dims, dims)); + + return kTfLiteOk; +} + +template +void FillImpl(const TfLiteEvalTensor* value, TfLiteEvalTensor* output) { + reference_ops::Fill( + micro::GetTensorShape(value), micro::GetTensorData(value), + micro::GetTensorShape(output), micro::GetTensorData(output)); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* value = + micro::GetEvalInput(context, node, kValueTensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + switch (value->type) { + case kTfLiteFloat32: + FillImpl(value, output); + break; + default: + TF_LITE_KERNEL_LOG( + context, "Fill only currently supports float32 for input 1, got %d.", + TfLiteTypeGetName(value->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_FILL() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected.cc index d3fdeacb..28fbd486 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected.cc @@ -28,176 +28,37 @@ limitations under the License. namespace tflite { namespace { -struct OpData { - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; - // The index of the temporary tensor where the quantized inputs are cached. - int input_quantized_index; - // Cached zero point values of tensors. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; -}; - -constexpr int kInputTensor = 0; -constexpr int kWeightsTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -TfLiteStatus CalculateOpData(TfLiteContext* context, - TfLiteFusedActivation activation, - TfLiteType data_type, const TfLiteTensor* input, - const TfLiteTensor* filter, - const TfLiteTensor* bias, TfLiteTensor* output, - OpData* data) { - TfLiteStatus status = kTfLiteOk; - if (data_type != kTfLiteFloat32) { - double real_multiplier = 0.0; - TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( - context, input, filter, bias, output, &real_multiplier)); - int exponent; - QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent); - data->output_shift = -exponent; - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, activation, output, &data->output_activation_min, - &data->output_activation_max)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - } - return status; -} - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); + return context->AllocatePersistentBuffer(context, + sizeof(OpDataFullyConnected)); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); TFLITE_DCHECK(node->builtin_data != nullptr); - OpData* data = static_cast(node->user_data); + auto* data = static_cast(node->user_data); const auto params = static_cast(node->builtin_data); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = + GetInput(context, node, kFullyConnectedInputTensor); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); + const TfLiteTensor* filter = + GetInput(context, node, kFullyConnectedWeightsTensor); TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + const TfLiteTensor* bias = + GetOptionalInputTensor(context, node, kFullyConnectedBiasTensor); + TfLiteTensor* output = GetOutput(context, node, kFullyConnectedOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE_MSG(context, input->type == filter->type, "Hybrid models are not supported on TFLite Micro."); - return CalculateOpData(context, params->activation, input->type, input, - filter, bias, output, data); -} - -TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - tflite::FullyConnectedParams op_params; - op_params.input_offset = -data.input_zero_point; - op_params.weights_offset = -data.filter_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.output_multiplier = data.output_multiplier; - // TODO(b/138810107): Figure out whether output shift should be inverted - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - - reference_integer_ops::FullyConnected( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; -} - -TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - tflite::FullyConnectedParams op_params; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - -#define TF_LITE_FULLY_CONNECTED(output_data_type) \ - reference_ops::FullyConnected( \ - op_params, tflite::micro::GetTensorShape(input), \ - tflite::micro::GetTensorData(input), \ - tflite::micro::GetTensorShape(filter), \ - tflite::micro::GetTensorData(filter), \ - tflite::micro::GetTensorShape(bias), \ - tflite::micro::GetTensorData(bias), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - switch (output->type) { - case kTfLiteUInt8: - TF_LITE_FULLY_CONNECTED(uint8_t); - break; - case kTfLiteInt16: - TF_LITE_FULLY_CONNECTED(int16_t); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteFusedActivation activation, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(activation, &output_activation_min, - &output_activation_max); - tflite::FullyConnectedParams op_params; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - tflite::reference_ops::FullyConnected( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; + return CalculateOpDataFullyConnected(context, params->activation, input->type, + input, filter, bias, output, data); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { @@ -206,33 +67,66 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { static_cast(node->builtin_data); const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kWeightsTensor); + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kBiasTensor); + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); + const auto& data = + *(static_cast(node->user_data)); // Checks in Prepare ensure input, output and filter types are all the same. switch (input->type) { - case kTfLiteFloat32: - return EvalFloat(context, node, params->activation, input, filter, bias, - output); - case kTfLiteInt8: - return EvalQuantizedInt8(context, node, data, input, filter, bias, - output); + case kTfLiteFloat32: { + tflite::reference_ops::FullyConnected( + FullyConnectedParamsFloat(params->activation), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } - case kTfLiteUInt8: - return EvalQuantized(context, node, data, input, filter, bias, output); + case kTfLiteInt8: { + tflite::reference_integer_ops::FullyConnected( + FullyConnectedParamsQuantized(data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } - default: + case kTfLiteUInt8: { + tflite::reference_ops::FullyConnected( + FullyConnectedParamsQuantized(data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + default: { TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", TfLiteTypeGetName(input->type), input->type); return kTfLiteError; + } } return kTfLiteOk; } diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected.h index 3e646718..46053220 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected.h +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected.h @@ -15,10 +15,51 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ #define TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ +#include + +#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" namespace tflite { +struct OpDataFullyConnected { + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + // The range of the fused activation layer. For example for kNone and + // uint8_t these would be 0 and 255. + int32_t output_activation_min; + int32_t output_activation_max; + // The index of the temporary tensor where the quantized inputs are cached. + int input_quantized_index; + // Cached zero point values of tensors. + int32_t input_zero_point; + int32_t filter_zero_point; + int32_t output_zero_point; +}; + +extern const int kFullyConnectedInputTensor; +extern const int kFullyConnectedWeightsTensor; +extern const int kFullyConnectedBiasTensor; +extern const int kFullyConnectedOutputTensor; + +// Returns a FullyConnectedParams struct with all the parameters needed for a +// float computation. +FullyConnectedParams FullyConnectedParamsFloat( + TfLiteFusedActivation activation); + +// Returns a FullyConnectedParams struct with all the parameters needed for a +// quantized computation. +FullyConnectedParams FullyConnectedParamsQuantized( + const OpDataFullyConnected& op_data); + +TfLiteStatus CalculateOpDataFullyConnected( + TfLiteContext* context, TfLiteFusedActivation activation, + TfLiteType data_type, const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output, OpDataFullyConnected* data); + // This is the most generic TfLiteRegistration. The actual supported types may // still be target dependent. The only requirement is that every implementation // (reference or optimized) must define this function. @@ -30,7 +71,7 @@ TfLiteRegistration Register_FULLY_CONNECTED(); // part of the build. As a result, we use defined(ARDUINO) as proxy for the // CMSIS kernels for this one special case. -// Returns a TfLiteRegistration struct for cmsis-nn kernel variant that only +// Returns a TfLiteRegistration struct for cmsis_nn kernel variant that only // supports int8. TfLiteRegistration Register_FULLY_CONNECTED_INT8(); diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected_common.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected_common.cc new file mode 100644 index 00000000..64046a9c --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/fully_connected_common.cc @@ -0,0 +1,78 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/fully_connected.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +const int kFullyConnectedInputTensor = 0; +const int kFullyConnectedWeightsTensor = 1; +const int kFullyConnectedBiasTensor = 2; +const int kFullyConnectedOutputTensor = 0; + +FullyConnectedParams FullyConnectedParamsQuantized( + const OpDataFullyConnected& op_data) { + FullyConnectedParams op_params; + op_params.input_offset = -op_data.input_zero_point; + op_params.weights_offset = -op_data.filter_zero_point; + op_params.output_offset = op_data.output_zero_point; + op_params.output_multiplier = op_data.output_multiplier; + op_params.output_shift = op_data.output_shift; + op_params.quantized_activation_min = op_data.output_activation_min; + op_params.quantized_activation_max = op_data.output_activation_max; + return op_params; +} + +FullyConnectedParams FullyConnectedParamsFloat( + TfLiteFusedActivation activation) { + FullyConnectedParams op_params; + CalculateActivationRange(activation, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +TfLiteStatus CalculateOpDataFullyConnected( + TfLiteContext* context, TfLiteFusedActivation activation, + TfLiteType data_type, const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output, + OpDataFullyConnected* data) { + if (data_type != kTfLiteFloat32) { + double real_multiplier = 0.0; + TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( + context, input, filter, bias, output, &real_multiplier)); + QuantizeMultiplier(real_multiplier, &data->output_multiplier, + &data->output_shift); + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + return CalculateActivationRangeQuantized(context, activation, output, + &data->output_activation_min, + &data->output_activation_max); + } + return kTfLiteOk; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.cc index cef6c01c..dd0ba8ba 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.cc @@ -15,6 +15,8 @@ limitations under the License. #include "tensorflow/lite/micro/kernels/kernel_runner.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" + namespace tflite { namespace micro { @@ -30,12 +32,12 @@ uint8_t KernelRunner::kKernelRunnerBuffer_[]; KernelRunner::KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* inputs, TfLiteIntArray* outputs, - void* builtin_data, ErrorReporter* error_reporter) - : allocator_(SimpleMemoryAllocator::Create( - error_reporter, kKernelRunnerBuffer_, kKernelRunnerBufferSize_)), + void* builtin_data) + : allocator_(SimpleMemoryAllocator::Create(GetMicroErrorReporter(), + kKernelRunnerBuffer_, + kKernelRunnerBufferSize_)), registration_(registration), - tensors_(tensors), - error_reporter_(error_reporter) { + tensors_(tensors) { // Prepare TfLiteContext: context_.impl_ = static_cast(this); context_.ReportError = ReportOpError; @@ -52,9 +54,10 @@ KernelRunner::KernelRunner(const TfLiteRegistration& registration, node_.builtin_data = builtin_data; } -TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data) { +TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data, + size_t length) { if (registration_.init) { - node_.user_data = registration_.init(&context_, init_data, /*length=*/0); + node_.user_data = registration_.init(&context_, init_data, length); } if (registration_.prepare) { TF_LITE_ENSURE_STATUS(registration_.prepare(&context_, &node_)); @@ -64,8 +67,7 @@ TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data) { TfLiteStatus KernelRunner::Invoke() { if (registration_.invoke == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "TfLiteRegistration missing invoke function pointer!"); + MicroPrintf("TfLiteRegistration missing invoke function pointer!"); return kTfLiteError; } return registration_.invoke(&context_, &node_); @@ -118,10 +120,8 @@ TfLiteStatus KernelRunner::RequestScratchBufferInArena(TfLiteContext* context, TFLITE_DCHECK(runner != nullptr); if (runner->scratch_buffer_count_ == kNumScratchBuffers_) { - TF_LITE_REPORT_ERROR( - runner->error_reporter_, - "Exceeded the maximum number of scratch tensors allowed (%d).", - kNumScratchBuffers_); + MicroPrintf("Exceeded the maximum number of scratch tensors allowed (%d).", + kNumScratchBuffers_); return kTfLiteError; } @@ -151,13 +151,9 @@ void* KernelRunner::GetScratchBuffer(TfLiteContext* context, int buffer_index) { void KernelRunner::ReportOpError(struct TfLiteContext* context, const char* format, ...) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - va_list args; va_start(args, format); - TF_LITE_REPORT_ERROR(runner->error_reporter_, format, args); + GetMicroErrorReporter()->Report(format, args); va_end(args); } diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.h index 45d107e7..b145097d 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.h +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_runner.h @@ -23,23 +23,22 @@ limitations under the License. namespace tflite { namespace micro { -// Helper class to perform a simulated kernel (i.e. TfLiteRegistration) lifecyle -// (init, prepare, invoke). All internal allocations are handled by this class. -// Simply pass in the registration, list of required tensors, inputs array, -// outputs array, and any pre-builtin data. Calling Invoke() will automatically -// walk the kernl and outputs will be ready on the the TfLiteTensor output -// provided during construction. +// Helper class to perform a simulated kernel (i.e. TfLiteRegistration) +// lifecycle (init, prepare, invoke). All internal allocations are handled by +// this class. Simply pass in the registration, list of required tensors, inputs +// array, outputs array, and any pre-builtin data. Calling Invoke() will +// automatically walk the kernel and outputs will be ready on the TfLiteTensor +// output provided during construction. class KernelRunner { public: KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* inputs, - TfLiteIntArray* outputs, void* builtin_data, - ErrorReporter* error_reporter); + TfLiteIntArray* outputs, void* builtin_data); // Calls init and prepare on the kernel (i.e. TfLiteRegistration) struct. Any - // exceptions will be reported through the error_reporter and returned as a - // status code here. - TfLiteStatus InitAndPrepare(const char* init_data = nullptr); + // exceptions will be DebugLog'd and returned as a status code. + TfLiteStatus InitAndPrepare(const char* init_data = nullptr, + size_t length = 0); // Calls init, prepare, and invoke on a given TfLiteRegistration pointer. // After successful invoke, results will be available in the output tensor as @@ -60,7 +59,7 @@ class KernelRunner { ...); private: - static constexpr int kNumScratchBuffers_ = 5; + static constexpr int kNumScratchBuffers_ = 12; static constexpr int kKernelRunnerBufferSize_ = 10000; static uint8_t kKernelRunnerBuffer_[kKernelRunnerBufferSize_]; @@ -68,7 +67,6 @@ class KernelRunner { SimpleMemoryAllocator* allocator_ = nullptr; const TfLiteRegistration& registration_; TfLiteTensor* tensors_ = nullptr; - ErrorReporter* error_reporter_ = nullptr; TfLiteContext context_ = {}; TfLiteNode node_ = {}; diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_util.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_util.cc index deca92b6..d769f9e5 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_util.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_util.cc @@ -37,5 +37,17 @@ const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) { return RuntimeShape(dims_size, dims_data); } +PaddingType RuntimePaddingType(TfLitePadding padding) { + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + } // namespace micro } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_util.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_util.h index 79cd58ec..043fb021 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_util.h +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/kernel_util.h @@ -18,6 +18,7 @@ limitations under the License. #include +#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/types.h" @@ -69,6 +70,8 @@ const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor); bool HaveSameShapes(const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2); +PaddingType RuntimePaddingType(TfLitePadding padding); + } // namespace micro } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/l2_pool_2d.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/l2_pool_2d.cc new file mode 100644 index 00000000..00b2b570 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/l2_pool_2d.cc @@ -0,0 +1,137 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include +#include + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/reference/pooling.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// required rank for input/output tensor shape +constexpr int kTensorShapeRank = 4; + +// input/output tensor shape rank associations +enum { kBatchRank = 0, kHeightRank, kWidthRank, kChannelRank }; + +TfLiteStatus L2Prepare(TfLiteContext* context, TfLiteNode* node) { + auto* params = static_cast(node->builtin_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), kTensorShapeRank); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), kTensorShapeRank); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + int batches = SizeOfDimension(input, kBatchRank); + int height = SizeOfDimension(input, kHeightRank); + int width = SizeOfDimension(input, kWidthRank); + int channels_out = SizeOfDimension(input, kChannelRank); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params->padding; + int out_width, out_height; + + params->computed.padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, 1, 1, height, width, + params->filter_height, params->filter_width, padding, &out_height, + &out_width); + + // We currently don't have a quantized implementation of L2Pool + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); + + // We must update the output tensor dimensions. + // The dims storage is expected to be the same area in memory + // for both TfLiteTensor and TfLiteEvalTensor. This is important + // because TfLiteTensor in the MicroInterpreter is a temporary + // allocation. + output->dims->data[kBatchRank] = batches; + output->dims->data[kHeightRank] = out_height; + output->dims->data[kWidthRank] = out_width; + output->dims->data[kChannelRank] = channels_out; + + return kTfLiteOk; +} + +void L2EvalFloat(const TfLitePoolParams& params, const TfLiteEvalTensor& input, + tflite::PoolParams* op_params, TfLiteEvalTensor* output) { + float activation_min, activation_max; + CalculateActivationRange(params.activation, &activation_min, &activation_max); + + op_params->float_activation_min = activation_min; + op_params->float_activation_max = activation_max; + reference_ops::L2Pool(*op_params, tflite::micro::GetTensorShape(&input), + tflite::micro::GetTensorData(&input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = static_cast(node->builtin_data); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + + tflite::PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = params->computed.padding.height; + op_params.padding_values.width = params->computed.padding.width; + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + L2EvalFloat(*params, *input, &op_params, output); + break; + default: + TF_LITE_KERNEL_LOG(context, + "L2_POOL_2D only supports float32 currently, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_L2_POOL_2D() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/L2Prepare, + /*invoke=*/L2Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/leaky_relu.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/leaky_relu.cc new file mode 100644 index 00000000..0a7521fb --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/leaky_relu.cc @@ -0,0 +1,153 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/leaky_relu.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +struct LeakyReluOpData { + // quantization parameters + int32_t output_multiplier_alpha; + int32_t output_shift_alpha; + int32_t output_multiplier_identity; + int32_t output_shift_identity; + int32_t input_zero_point; + int32_t output_zero_point; +}; + +template +void QuantizeLeakyRelu(const LeakyReluOpData& data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + LeakyReluParams op_params = {}; + + op_params.input_offset = data.input_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.output_multiplier_alpha = data.output_multiplier_alpha; + op_params.output_shift_alpha = data.output_shift_alpha; + op_params.output_multiplier_identity = data.output_multiplier_identity; + op_params.output_shift_identity = data.output_shift_identity; + reference_ops::QuantizeLeakyRelu(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + if (output->type == kTfLiteInt8) { + LeakyReluOpData* data = static_cast(node->user_data); + const auto* params = + static_cast(node->builtin_data); + + data->input_zero_point = input->params.zero_point; + data->output_zero_point = output->params.zero_point; + + int output_shift_alpha; + double alpha_multiplier = static_cast( + input->params.scale * params->alpha / output->params.scale); + QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, + &output_shift_alpha); + data->output_shift_alpha = static_cast(output_shift_alpha); + + int output_shift_identity; + double identity_multiplier = + static_cast(input->params.scale / output->params.scale); + QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, + &output_shift_identity); + data->output_shift_identity = static_cast(output_shift_identity); + } + + return kTfLiteOk; +} + +void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(LeakyReluOpData)); +} + +TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const LeakyReluOpData& data = *static_cast(node->user_data); + + switch (input->type) { + case kTfLiteFloat32: { + LeakyReluParams op_params = {}; + const auto* params = + static_cast(node->builtin_data); + + op_params.alpha = params->alpha; + reference_ops::LeakyRelu(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } break; + case kTfLiteInt8: { + QuantizeLeakyRelu(data, input, output); + return kTfLiteOk; + } break; + default: + TF_LITE_KERNEL_LOG( + context, "Only float32, int8 are supported by LEAKY_RELU, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteError; +} + +} // namespace + +TfLiteRegistration Register_LEAKY_RELU() { + return {/*init=*/LeakyReluInit, + /*free=*/nullptr, + /*prepare=*/LeakyReluPrepare, + /*invoke=*/LeakyReluEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/micro_ops.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/micro_ops.h index a65fc4f6..bbb166b8 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/micro_ops.h +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/micro_ops.h @@ -31,12 +31,26 @@ namespace tflite { // (https://abseil.io/tips/130). Any new ops (or cleanup of existing ops should // have their Register function declarations in the tflite namespace. +TfLiteRegistration Register_ADD_N(); +TfLiteRegistration Register_BATCH_TO_SPACE_ND(); +TfLiteRegistration Register_CAST(); TfLiteRegistration Register_CONV_2D(); TfLiteRegistration Register_DEPTHWISE_CONV_2D(); +TfLiteRegistration Register_DIV(); +TfLiteRegistration Register_ELU(); +TfLiteRegistration Register_EXP(); +TfLiteRegistration Register_EXPAND_DIMS(); +TfLiteRegistration Register_FILL(); +TfLiteRegistration Register_L2_POOL_2D(); +TfLiteRegistration Register_LEAKY_RELU(); TfLiteRegistration Register_QUANTIZE(); TfLiteRegistration Register_SHAPE(); TfLiteRegistration Register_SOFTMAX(); +TfLiteRegistration Register_SPACE_TO_BATCH_ND(); +TfLiteRegistration Register_SQUEEZE(); TfLiteRegistration Register_SVDF(); +TfLiteRegistration Register_TRANSPOSE_CONV(); +TfLiteRegistration Register_ZEROS_LIKE(); namespace ops { namespace micro { diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h index 85db263e..e406ac12 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/micro_utils.h @@ -1,8 +1,11 @@ /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize.cc index f6d8c927..97f5a004 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize.cc @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/quantize.h" + +#include "tensorflow/lite/micro/kernels/quantize.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/requantize.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/kernel_util.h" @@ -25,160 +25,10 @@ limitations under the License. namespace tflite { namespace { -struct OpData { - tflite::QuantizationParams quantization_params; - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - int32_t input_zero_point; -}; - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - // TODO(b/128934713): Add support for fixed-point per-channel quantization. - // Currently this only support affine per-layer quantization. - TF_LITE_ENSURE_EQ(context, output->quantization.type, - kTfLiteAffineQuantization); - const auto* affine_quantization = - reinterpret_cast(output->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); - - TF_LITE_ENSURE(context, input->type == kTfLiteFloat32 || - input->type == kTfLiteInt16 || - input->type == kTfLiteInt8); - TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 || - output->type == kTfLiteInt8 || - output->type == kTfLiteInt16 || - output->type == kTfLiteInt32); - - if (((input->type == kTfLiteInt16 || input->type == kTfLiteInt8) && - output->type == kTfLiteInt8) || - (input->type == kTfLiteInt16 && output->type == kTfLiteInt16)) { - double effective_scale = static_cast(input->params.scale) / - static_cast(output->params.scale); - - QuantizeMultiplier(effective_scale, &data->output_multiplier, - &data->output_shift); - } - - data->quantization_params.zero_point = output->params.zero_point; - data->quantization_params.scale = static_cast(output->params.scale); - - data->input_zero_point = input->params.zero_point; - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - if (input->type == kTfLiteFloat32) { - switch (output->type) { - case kTfLiteInt8: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteUInt8: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (input->type == kTfLiteInt16) { - size_t size = ElementCount(*input->dims); - switch (output->type) { - case kTfLiteInt8: - reference_ops::Requantize(tflite::micro::GetTensorData(input), - size, data->output_multiplier, - data->output_shift, data->input_zero_point, - data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::Requantize( - tflite::micro::GetTensorData(input), size, - data->output_multiplier, data->output_shift, data->input_zero_point, - data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - case kTfLiteInt32: - reference_ops::Requantize( - tflite::micro::GetTensorData(input), size, - data->output_multiplier, data->output_shift, data->input_zero_point, - data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (input->type == kTfLiteInt8) { - // Int8 to Int8 requantization, required if the input and output tensors - // have different scales and/or zero points. - size_t size = ElementCount(*input->dims); - switch (output->type) { - case kTfLiteInt8: - reference_ops::Requantize(tflite::micro::GetTensorData(input), - size, data->output_multiplier, - data->output_shift, data->input_zero_point, - data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else { - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - return kTfLiteOk; + return context->AllocatePersistentBuffer(context, + sizeof(OpDataQuantizeReference)); } } // namespace @@ -186,8 +36,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration Register_QUANTIZE() { return {/*init=*/Init, /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, + /*prepare=*/PrepareQuantizeReference, + /*invoke=*/EvalQuantizeReference, /*profiling_string=*/nullptr, /*builtin_code=*/0, /*custom_name=*/nullptr, diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize.h new file mode 100644 index 00000000..ba93809a --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize.h @@ -0,0 +1,37 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +struct OpDataQuantizeReference { + tflite::QuantizationParams quantization_params; + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t requantize_output_multiplier; + int requantize_output_shift; + + int32_t input_zero_point; +}; + +TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node); +TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, TfLiteNode* node); +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize_common.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize_common.cc new file mode 100644 index 00000000..098854cd --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/quantize_common.cc @@ -0,0 +1,171 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/quantize.h" +#include "tensorflow/lite/kernels/internal/reference/requantize.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/quantize.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, + TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + auto* data = static_cast(node->user_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + const TfLiteTensor* input = GetInput(context, node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = GetOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + // TODO(b/128934713): Add support for fixed-point per-channel quantization. + // Currently this only support affine per-layer quantization. + TF_LITE_ENSURE_EQ(context, output->quantization.type, + kTfLiteAffineQuantization); + const auto* affine_quantization = + reinterpret_cast(output->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); + + TF_LITE_ENSURE(context, input->type == kTfLiteFloat32 || + input->type == kTfLiteInt16 || + input->type == kTfLiteInt8); + TF_LITE_ENSURE(context, output->type == kTfLiteInt8 || + output->type == kTfLiteInt16 || + output->type == kTfLiteInt32); + + if ((input->type == kTfLiteInt16 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && output->type == kTfLiteInt32) || + (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) || + (input->type == kTfLiteInt16 && output->type == kTfLiteInt32)) { + double effective_scale = static_cast(input->params.scale) / + static_cast(output->params.scale); + + QuantizeMultiplier(effective_scale, &data->requantize_output_multiplier, + &data->requantize_output_shift); + } + + data->quantization_params.zero_point = output->params.zero_point; + data->quantization_params.scale = static_cast(output->params.scale); + + data->input_zero_point = input->params.zero_point; + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + auto* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + if (input->type == kTfLiteFloat32) { + switch (output->type) { + case kTfLiteInt8: + reference_ops::AffineQuantize( + data->quantization_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::AffineQuantize( + data->quantization_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + default: + TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteInt16) { + size_t size = ElementCount(*input->dims); + switch (output->type) { + case kTfLiteInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + case kTfLiteInt32: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + default: + TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteInt8) { + // Int8 to Int8 requantization, required if the input and output tensors + // have different scales and/or zero points. + size_t size = ElementCount(*input->dims); + switch (output->type) { + case kTfLiteInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt32: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + default: + TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else { + TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax.cc index c96fa561..f6a30010 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,12 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/softmax.h" +#include "tensorflow/lite/micro/kernels/softmax.h" #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/op_macros.h" @@ -27,86 +28,9 @@ limitations under the License. namespace tflite { namespace { -// Softmax parameter data that persists in user_data -static constexpr int kInt16LUTArraySize = 513; - -TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, - const TfLiteTensor* input, - TfLiteTensor* output, - const TfLiteSoftmaxParams* params, - SoftmaxParams* op_data) { - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8 || - input->type == kTfLiteInt16) { - if (input->type == kTfLiteUInt8) { - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt8); - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - } else if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 32768, - (0.001f * 1.f / 32768)); - } else { // input->type == kTfLiteInt8 - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); - if (output->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768); - TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 65536, - (0.001f * 1.f / 65536)); - } else { // output->type == kTfLiteint8 - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); - TF_LITE_ENSURE(context, output->params.scale == 1.f / 256); - } - } - - static const int kScaledDiffIntegerBits = 5; - - // Calculate input_multiplier and input_left_shift - if (input->type == kTfLiteInt16) { - int input_left_shift; - double input_scale_beta_rescale = - static_cast(input->params.scale) * - static_cast(params->beta) / - (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] - // correspond to [-10.0, 0.0] - QuantizeMultiplier(input_scale_beta_rescale, &op_data->input_multiplier, - &input_left_shift); - op_data->input_left_shift = input_left_shift; - } else { - int input_left_shift; - tflite::PreprocessSoftmaxScaling( - static_cast(params->beta), - static_cast(input->params.scale), kScaledDiffIntegerBits, - &op_data->input_multiplier, &input_left_shift); - op_data->input_left_shift = input_left_shift; - op_data->diff_min = - -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, - op_data->input_left_shift); - } - } else { - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); - op_data->beta = static_cast(params->beta); - } - return kTfLiteOk; -} - -// Takes a tensor and performs softmax along the last dimension. -void SoftmaxFloat(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, - const SoftmaxParams& op_data) { - tflite::reference_ops::Softmax(op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - void SoftmaxQuantized(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, const SoftmaxParams& op_data) { - if (input->type == kTfLiteUInt8) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (input->type == kTfLiteInt8) { + if (input->type == kTfLiteInt8) { if (output->type == kTfLiteInt16) { tflite::reference_ops::Softmax( op_data, tflite::micro::GetTensorShape(input), @@ -129,60 +53,6 @@ void SoftmaxQuantized(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, } } -void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(SoftmaxParams)); -} - -TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE(context, NumDimensions(input) >= 1); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE(context, node->user_data != nullptr); - SoftmaxParams* op_data = static_cast(node->user_data); - // Only allocate LUTs for KTfLiteInt16 data type - if (input->type == kTfLiteInt16) { - void* raw_exp_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, raw_exp_lut != nullptr); - op_data->exp_lut = reinterpret_cast(raw_exp_lut); - void* one_over_one_plus_x_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, one_over_one_plus_x_lut != nullptr); - op_data->one_over_one_plus_x_lut = - reinterpret_cast(one_over_one_plus_x_lut); - } - - if (output->type == kTfLiteInt16) { - TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || - input->type == kTfLiteUInt8 || - input->type == kTfLiteInt16); - } else { - TF_LITE_ENSURE_EQ(context, input->type, output->type); - } - - // Populate LUT if required - if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - // exp LUT only used on negative values - // we consider exp(-10.0) is insignificant to accumulation - gen_lut([](float value) { return std::exp(value); }, -10.0f, 0.0f, - op_data->exp_lut, kInt16LUTArraySize); - gen_lut([](float value) { return 1.0f / (1.0f + value); }, 0.0f, 1.0f, - op_data->one_over_one_plus_x_lut, kInt16LUTArraySize); - op_data->zero_point = output->params.zero_point; - op_data->scale = output->params.scale; - } - - auto* params = static_cast(node->builtin_data); - return CalculateSoftmaxParams(context, input, output, params, op_data); -} - TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); @@ -192,11 +62,14 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { switch (input->type) { case kTfLiteFloat32: { - SoftmaxFloat(input, output, op_data); + tflite::reference_ops::Softmax( + op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); return kTfLiteOk; } case kTfLiteInt8: - case kTfLiteUInt8: case kTfLiteInt16: { SoftmaxQuantized(input, output, op_data); return kTfLiteOk; diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax.h new file mode 100644 index 00000000..3c9d0cda --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax.h @@ -0,0 +1,30 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length); + +TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax_common.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax_common.cc new file mode 100644 index 00000000..153f9469 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/softmax_common.cc @@ -0,0 +1,140 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/softmax.h" + +namespace tflite { + +namespace { +// Softmax parameter data that persists in user_data +const int kInt16LUTArraySize = 513; + +TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteTensor* output, + const TfLiteSoftmaxParams* params, + SoftmaxParams* op_data) { + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 32768, + (0.001f * 1.f / 32768)); + } else { // input->type == kTfLiteInt8 + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); + if (output->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768); + TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 65536, + (0.001f * 1.f / 65536)); + } else { // output->type == kTfLiteint8 + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); + TF_LITE_ENSURE(context, output->params.scale == 1.f / 256); + } + } + + static const int kScaledDiffIntegerBits = 5; + + // Calculate input_multiplier and input_left_shift + if (input->type == kTfLiteInt16) { + int input_left_shift; + double input_scale_beta_rescale = + static_cast(input->params.scale) * + static_cast(params->beta) / + (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] + // correspond to [-10.0, 0.0] + QuantizeMultiplier(input_scale_beta_rescale, &op_data->input_multiplier, + &input_left_shift); + op_data->input_left_shift = input_left_shift; + } else { + int input_left_shift; + tflite::PreprocessSoftmaxScaling( + static_cast(params->beta), + static_cast(input->params.scale), kScaledDiffIntegerBits, + &op_data->input_multiplier, &input_left_shift); + op_data->input_left_shift = input_left_shift; + op_data->diff_min = + -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, + op_data->input_left_shift); + } + } else { + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); + op_data->beta = static_cast(params->beta); + } + return kTfLiteOk; +} + +} // namespace + +void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(SoftmaxParams)); +} + +TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + const TfLiteTensor* input = GetInput(context, node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + TfLiteTensor* output = GetOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE(context, node->user_data != nullptr); + SoftmaxParams* op_data = static_cast(node->user_data); + // Only allocate LUTs for KTfLiteInt16 data type + if (input->type == kTfLiteInt16) { + void* raw_exp_lut = context->AllocatePersistentBuffer( + context, sizeof(int16_t) * kInt16LUTArraySize); + TF_LITE_ENSURE(context, raw_exp_lut != nullptr); + op_data->exp_lut = reinterpret_cast(raw_exp_lut); + void* one_over_one_plus_x_lut = context->AllocatePersistentBuffer( + context, sizeof(int16_t) * kInt16LUTArraySize); + TF_LITE_ENSURE(context, one_over_one_plus_x_lut != nullptr); + op_data->one_over_one_plus_x_lut = + reinterpret_cast(one_over_one_plus_x_lut); + } + + if (output->type == kTfLiteInt16) { + TF_LITE_ENSURE(context, + input->type == kTfLiteInt8 || input->type == kTfLiteInt16); + } else { + TF_LITE_ENSURE_EQ(context, input->type, output->type); + } + + // Populate LUT if required + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + // exp LUT only used on negative values + // we consider exp(-10.0) is insignificant to accumulation + gen_lut([](float value) { return std::exp(value); }, -10.0f, 0.0f, + op_data->exp_lut, kInt16LUTArraySize); + gen_lut([](float value) { return 1.0f / (1.0f + value); }, 0.0f, 1.0f, + op_data->one_over_one_plus_x_lut, kInt16LUTArraySize); + op_data->zero_point = output->params.zero_point; + op_data->scale = output->params.scale; + } + + auto* params = static_cast(node->builtin_data); + return CalculateSoftmaxParams(context, input, output, params, op_data); +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/space_to_batch_nd.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/space_to_batch_nd.cc new file mode 100644 index 00000000..fdfb81bc --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/space_to_batch_nd.cc @@ -0,0 +1,121 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h" + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/internal/types.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kBlockShapeTensor = 1; +constexpr int kCropsTensor = 2; +constexpr int kOutputTensor = 0; + +// Currently, only 3D NHC and 4D NHWC input/output op_context are supported. +// In case of 3D input, it will be extended to 3D NHWC by adding W=1. +// The 4D array need to have exactly 2 spatial dimensions. +// TODO(b/149952582): Support arbitrary dimension in SpaceToBatchND. +const int kInputOutputMinDimensionNum = 3; +const int kInputOutputMaxDimensionNum = 4; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(SpaceToBatchParams)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr && output != nullptr); + + TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(output) >= kInputOutputMinDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(input) <= kInputOutputMaxDimensionNum); + TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const SpaceToBatchParams& params = + *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* block_shape = + tflite::micro::GetEvalInput(context, node, kBlockShapeTensor); + const TfLiteEvalTensor* crops = + tflite::micro::GetEvalInput(context, node, kCropsTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + reference_ops::SpaceToBatchND( + params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(block_shape), + tflite::micro::GetTensorData(block_shape), + tflite::micro::GetTensorShape(crops), + tflite::micro::GetTensorData(crops), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::SpaceToBatchND( + params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(block_shape), + tflite::micro::GetTensorData(block_shape), + tflite::micro::GetTensorShape(crops), + tflite::micro::GetTensorData(crops), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace. + +TfLiteRegistration Register_SPACE_TO_BATCH_ND() { + return {/*init=*/Init, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/squeeze.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/squeeze.cc new file mode 100644 index 00000000..522c2d0e --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/squeeze.cc @@ -0,0 +1,111 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/memory_helpers.h" + +namespace tflite { +namespace { + +struct SqueezeContext { + SqueezeContext(TfLiteContext* context, TfLiteNode* node) + : params(reinterpret_cast(node->builtin_data)), + input(GetInput(context, node, 0)), + output(GetOutput(context, node, 0)) {} + TfLiteSqueezeParams* params; + const TfLiteTensor* const input; + TfLiteTensor* output; +}; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + SqueezeContext op_context(context, node); + const int input_num_dims = NumDimensions(op_context.input); + const int num_squeeze_dims = op_context.params->num_squeeze_dims; + + // Determines number of dimensions of output tensor after squeeze. + const TfLiteIntArray* input_dims = op_context.input->dims; + const TfLiteIntArray* output_dims = op_context.output->dims; + const int* squeeze_dims = op_context.params->squeeze_dims; + + constexpr int max_squeeze_dims = 8; + TF_LITE_ENSURE(context, input_num_dims <= max_squeeze_dims); + bool should_squeeze[max_squeeze_dims] = {}; + + if (num_squeeze_dims == 0) { + for (int idx = 0; idx < input_num_dims; ++idx) { + if (input_dims->data[idx] == 1) { + should_squeeze[idx] = true; + } + } + } else { + for (int idx = 0; idx < num_squeeze_dims; ++idx) { + int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + input_num_dims + : squeeze_dims[idx]; + TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims && + input_dims->data[current] == 1); + should_squeeze[current] = true; + } + } + + // Ensure output dimensions are big enough. + for (int in_idx = 0, out_idx = 0; in_idx < input_num_dims; ++in_idx) { + if (!should_squeeze[in_idx]) { + TFLITE_CHECK_GE(output_dims->data[out_idx++], input_dims->data[in_idx]); + } + } + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + SqueezeContext op_context(context, node); + + if (op_context.input->type == kTfLiteString) { + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(op_context.input->type), + op_context.input->type); + return kTfLiteError; + } + + TF_LITE_ENSURE_EQ(context, op_context.input->bytes, op_context.output->bytes); + memcpy(op_context.output->data.raw, op_context.input->data.raw, + op_context.input->bytes); + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_SQUEEZE() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf.cc index 764fdc1b..cd22e31b 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/lite/micro/kernels/svdf.h" + #include #include "tensorflow/lite/c/builtin_op_data.h" @@ -29,496 +31,44 @@ limitations under the License. namespace tflite { namespace { -struct OpData { - int32_t effective_scale_1_a; - int32_t effective_scale_2_a; - // b versions of each scale are kept at int since the numbers are just the - // shift value - typically between [-32, 32]. - int effective_scale_1_b; - int effective_scale_2_b; - int scratch_tensor_index; - int scratch_output_tensor_index; - - // Cached tensor zero point values for quantized operations. - int input_zero_point; - int output_zero_point; -}; - -// Input tensors. -constexpr int kInputTensor = 0; -constexpr int kWeightsFeatureTensor = 1; -constexpr int kWeightsTimeTensor = 2; -constexpr int kBiasTensor = 3; -// This is a variable tensor, and will be modified by this op. -constexpr int kInputActivationStateTensor = 4; - -// Output tensor. -constexpr int kOutputTensor = 0; - -/** - * This version of SVDF is specific to TFLite Micro. It contains the following - * differences between the TFLite version: - * - * 1.) Scratch tensor allocation - scratch tensors must be known ahead of time - * for the Micro interpreter. - * 2.) Output dimensions - the TFLite version determines output size and runtime - * and resizes the output tensor. Micro runtime does not support tensor - * resizing. - */ -static inline void ApplyTimeWeightsBiasAndActivation( - int batch_size, int memory_size, int num_filters, int num_units, int rank, - const float* const __restrict__ weights_time_ptr, - const float* const __restrict__ bias_ptr, TfLiteFusedActivation activation, - float* const __restrict__ state_ptr, float* const __restrict__ scratch_ptr, - float* const __restrict__ output_ptr) { - // Compute matmul(activation_state, weights_time). - for (int b = 0; b < batch_size; ++b) { - // Perform batched vector dot product: - float* scratch_ptr_batch = scratch_ptr + b * num_filters; - const float* vector1_ptr = weights_time_ptr; - const float* vector2_ptr = state_ptr + b * memory_size * num_filters; - for (int i = 0; i < num_filters; ++i) { - *scratch_ptr_batch = 0.f; - for (int j = 0; j < memory_size; ++j) { - *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; - } - scratch_ptr_batch++; - } - } - - // Initialize output with bias if provided. - if (bias_ptr) { - // VectorBatchVectorAssign - for (int i = 0; i < batch_size; ++i) { - float* output_data = output_ptr + i * num_units; - const float* bias_data = bias_ptr; - for (int j = 0; j < num_units; ++j) { - *output_data++ = *bias_data++; - } - } - } else { - float* output_data = output_ptr; - for (int i = 0; i < batch_size * num_units; ++i) { - *output_data++ = 0.0f; - } - } - - // Reduction sum. - for (int b = 0; b < batch_size; ++b) { - float* output_ptr_batch = output_ptr + b * num_units; - float* scratch_ptr_batch = scratch_ptr + b * num_filters; - - // Reduction sum vector - for (int i = 0; i < num_units; ++i) { - for (int j = 0; j < rank; j++) { - output_ptr_batch[i] += *scratch_ptr_batch++; - } - } - } - - // Apply activation. - for (int b = 0; b < batch_size; ++b) { - float* output_ptr_batch = output_ptr + b * num_units; - for (int i = 0; i < num_units; ++i) { - *output_ptr_batch = - tflite::ops::micro::ActivationValFloat(activation, *output_ptr_batch); - ++output_ptr_batch; - } - } -} - -inline void EvalFloatSVDF( - TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* weights_feature, - const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, - const TfLiteSVDFParams* params, int scratch_tensor_index, - TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output) { - const int rank = params->rank; - const int batch_size = input->dims->data[0]; - const int input_size = input->dims->data[1]; - const int num_filters = weights_feature->dims->data[0]; - const int num_units = num_filters / rank; - const int memory_size = weights_time->dims->data[1]; - - const float* weights_feature_ptr = - tflite::micro::GetTensorData(weights_feature); - const float* weights_time_ptr = - tflite::micro::GetTensorData(weights_time); - const float* bias_ptr = tflite::micro::GetTensorData(bias); - const float* input_ptr = tflite::micro::GetTensorData(input); - - float* state_ptr = tflite::micro::GetTensorData(activation_state); - - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(context->GetScratchBuffer != nullptr); - - float* scratch_ptr = static_cast( - context->GetScratchBuffer(context, scratch_tensor_index)); - - float* output_ptr = tflite::micro::GetTensorData(output); - - // Left shift the activation_state. - { - float* new_state_start = state_ptr; - const float* old_state_start = state_ptr + 1; - const float* old_state_end = - state_ptr + batch_size * num_filters * memory_size; - while (old_state_start != old_state_end) { - *new_state_start++ = *old_state_start++; - } - } - - // Note: no need to clear the latest activation, matmul is not accumulative. - - // Compute conv1d(inputs, weights_feature). - // The activation_state's rightmost column is used to save current cycle - // activation. This is achieved by starting at state_ptr[memory_size - 1] and - // having the stride equal to memory_size. - - // Perform batched matrix vector multiply operation: - { - const float* matrix = weights_feature_ptr; - const float* vector = input_ptr; - float* result = &state_ptr[memory_size - 1]; - float* result_in_batch = result; - for (int i = 0; i < batch_size; ++i) { - const float* matrix_ptr = matrix; - for (int j = 0; j < num_filters; ++j) { - float dot_prod = 0.0f; - const float* vector_in_batch = vector + i * input_size; - for (int k = 0; k < input_size; ++k) { - dot_prod += *matrix_ptr++ * *vector_in_batch++; - } - *result_in_batch = dot_prod; - result_in_batch += memory_size; - } - } - } - - ApplyTimeWeightsBiasAndActivation( - batch_size, memory_size, num_filters, num_units, rank, weights_time_ptr, - bias_ptr, params->activation, state_ptr, scratch_ptr, output_ptr); -} - -void EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input_tensor, - const TfLiteEvalTensor* weights_feature_tensor, - const TfLiteEvalTensor* weights_time_tensor, - const TfLiteEvalTensor* bias_tensor, - const TfLiteSVDFParams* params, - TfLiteEvalTensor* activation_state_tensor, - TfLiteEvalTensor* output_tensor, const OpData& data) { - const int n_rank = params->rank; - const int n_batch = input_tensor->dims->data[0]; - const int n_input = input_tensor->dims->data[1]; - const int n_filter = weights_feature_tensor->dims->data[0]; - const int n_unit = n_filter / n_rank; - const int n_memory = weights_time_tensor->dims->data[1]; - - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(context->GetScratchBuffer != nullptr); - - int32_t* scratch_tensor = static_cast( - context->GetScratchBuffer(context, data.scratch_tensor_index)); - int32_t* scratch_output_tensor = static_cast( - context->GetScratchBuffer(context, data.scratch_output_tensor_index)); - - // Shift states. - int16_t* const state_ptr = - tflite::micro::GetTensorData(activation_state_tensor); - - // Left shift the activation_state. - { - int16_t* new_state_start = state_ptr; - const int16_t* old_state_start = state_ptr + 1; - const int16_t* old_state_end = state_ptr + n_batch * n_filter * n_memory; - while (old_state_start != old_state_end) { - *new_state_start++ = *old_state_start++; - } - } - - // Note: no need to clear the latest activation, matmul is not accumulative. - - // Feature matmul. - { - int16_t* state = - tflite::micro::GetTensorData(activation_state_tensor); - const int8_t* input = tflite::micro::GetTensorData(input_tensor); - const int8_t* weight_feature = - tflite::micro::GetTensorData(weights_feature_tensor); - const int32_t output_max = std::numeric_limits::max(); - const int32_t output_min = std::numeric_limits::min(); - int16_t* result_in_batch = state + (n_memory - 1); - for (int b = 0; b < n_batch; b++) { - const int8_t* matrix_ptr = weight_feature; - for (int r = 0; r < n_filter; r++) { - int32_t dot_prod = 0; - const int8_t* vector_in_batch = input + b * n_input; - for (int c = 0; c < n_input; c++) { - dot_prod += - *matrix_ptr++ * (*vector_in_batch++ - data.input_zero_point); - } - dot_prod = MultiplyByQuantizedMultiplier( - dot_prod, data.effective_scale_1_a, data.effective_scale_1_b); - dot_prod = std::min(std::max(output_min, dot_prod), output_max); - // This assumes state is symmetrically quantized. Otherwise last bit of - // state should be initialized to its zero point and accumulate the - // dot_prod. - // Equivalent as the following: - // result_in_batch = zero point, which happens to be zero. - // result_in_batch += dot_prod_56. - *result_in_batch = dot_prod; - result_in_batch += n_memory; - } - } - } - - // Time. - { - for (int b = 0; b < n_batch; ++b) { - int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; - - // Perform batched vector dot product: - const int16_t* vector1_ptr = - tflite::micro::GetTensorData(weights_time_tensor); - const int16_t* vector2_ptr = - tflite::micro::GetTensorData(activation_state_tensor) + - b * n_memory * n_filter; - - for (int i = 0; i < n_filter; i++) { - *scratch_ptr_batch = 0; - for (int j = 0; j < n_memory; j++) { - *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; - } - scratch_ptr_batch++; - } - } - } - - // Reduce, add bias, rescale, activation. - { - // Add bias. - if (bias_tensor) { - // Vector batch assign: - const int32_t* bias_data = - tflite::micro::GetTensorData(bias_tensor); - for (int i = 0; i < n_batch; ++i) { - int32_t* output_ptr = scratch_output_tensor + i * n_unit; - const int32_t* bias_ptr = bias_data; - for (int j = 0; j < n_unit; ++j) { - *output_ptr++ = *bias_ptr++; - } - } - } else { - int32_t* output_ptr = scratch_output_tensor; - for (int i = 0; i < n_batch * n_unit; ++i) { - *output_ptr++ = 0; - } - } - - // Reduce. - for (int b = 0; b < n_batch; ++b) { - int32_t* output_temp_ptr = scratch_output_tensor + b * n_unit; - int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; - - // Reduction sum vector - for (int i = 0; i < n_unit; ++i) { - for (int j = 0; j < n_rank; ++j) { - output_temp_ptr[i] += *scratch_ptr_batch++; - } - } - } - - // Rescale. - const int32_t output_max = std::numeric_limits::max(); - const int32_t output_min = std::numeric_limits::min(); - for (int i = 0; i < n_batch * n_unit; ++i) { - int32_t x1 = scratch_output_tensor[i]; - int32_t x2 = MultiplyByQuantizedMultiplier(x1, data.effective_scale_2_a, - data.effective_scale_2_b); - int32_t x3 = x2 + data.output_zero_point; - int32_t x4 = std::min(std::max(output_min, x3), output_max); - tflite::micro::GetTensorData(output_tensor)[i] = - static_cast(x4); - } - } -} - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(OpData)); } -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - - const auto* params = static_cast(node->builtin_data); - - // Validate Tensor Inputs (dtype depends on quantization): - // [0] = Input, {2, batch_size, input_size} - // [1] = Weights Feature, {2, num_filters, input_size} - // [2] = Weights Time, {2, num_filters, memory_size} - // [3] = Bias (optional), {1, num_units} - // [4] = Activation State (variable), - // {2, batch_size, memory_size * num_filters} - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* weights_feature = - GetInput(context, node, kWeightsFeatureTensor); - TF_LITE_ENSURE(context, weights_feature != nullptr); - const TfLiteTensor* weights_time = - GetInput(context, node, kWeightsTimeTensor); - TF_LITE_ENSURE(context, weights_time != nullptr); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - const TfLiteTensor* activation_state = - GetInput(context, node, kInputActivationStateTensor); - TF_LITE_ENSURE(context, activation_state != nullptr); - - // Define input constants based on input tensor definition above: - const int rank = params->rank; - const int input_size = input->dims->data[1]; - const int batch_size = input->dims->data[0]; - const int num_filters = weights_feature->dims->data[0]; - TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); - const int num_units = num_filters / rank; - const int memory_size = weights_time->dims->data[1]; - - // Validate Input Tensor: - TF_LITE_ENSURE(context, - input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); - - // Validate Tensor Output: - // [0] = float/int8_t, {2, batch_size, num_units} - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2); - TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size); - TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units); - - // Validate Weights Feature Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2); - TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size); - - // Validate Weights Time Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2); - TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters); - TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size); - - // Validate Optional Bias Input Tensor: - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); - } - - // Validate Activation State Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2); - TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size); - TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1], - memory_size * num_filters); - // Since is_variable is not part of TFLiteEvalTensor, check is_variable here. - TF_LITE_ENSURE_EQ(context, activation_state->is_variable, true); - - TF_LITE_ENSURE_EQ(context, node->inputs->size, 5); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteInt16); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteInt16); - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); - } - - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); - - const double effective_scale_1 = static_cast( - input->params.scale * weights_feature->params.scale / - activation_state->params.scale); - const double effective_scale_2 = - static_cast(activation_state->params.scale * - weights_time->params.scale / output->params.scale); - - // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready. - TF_LITE_ENSURE( - context, - std::abs(static_cast(bias->params.scale) - - static_cast(activation_state->params.scale * - weights_time->params.scale)) < 1e-5); - - QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a), - &(data->effective_scale_1_b)); - QuantizeMultiplier(effective_scale_2, &(data->effective_scale_2_a), - &(data->effective_scale_2_b)); - - data->input_zero_point = input->params.zero_point; - data->output_zero_point = output->params.zero_point; - - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - - const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( - context, batch_size * num_filters * sizeof(int32_t), - &(data->scratch_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_status); - - const TfLiteStatus scratch_output_status = - context->RequestScratchBufferInArena( - context, batch_size * num_units * sizeof(int32_t), - &(data->scratch_output_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_output_status); - } else { - TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32); - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); - } - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); - - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( - context, batch_size * num_filters * sizeof(float), - &(data->scratch_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_status); - } - - return kTfLiteOk; -} - TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); TFLITE_DCHECK(node->user_data != nullptr); const OpData& data = *(static_cast(node->user_data)); const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kSvdfInputTensor); const TfLiteEvalTensor* weights_feature = - tflite::micro::GetEvalInput(context, node, kWeightsFeatureTensor); + tflite::micro::GetEvalInput(context, node, kSvdfWeightsFeatureTensor); const TfLiteEvalTensor* weights_time = - tflite::micro::GetEvalInput(context, node, kWeightsTimeTensor); + tflite::micro::GetEvalInput(context, node, kSvdfWeightsTimeTensor); const TfLiteEvalTensor* bias = (NumInputs(node) == 5) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + ? tflite::micro::GetEvalInput(context, node, kSvdfBiasTensor) : nullptr; TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( - context, node, kInputActivationStateTensor); + context, node, kSvdfInputActivationStateTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kSvdfOutputTensor); switch (weights_feature->type) { case kTfLiteFloat32: { - EvalFloatSVDF(context, node, input, weights_feature, weights_time, bias, - params, data.scratch_tensor_index, activation_state, - output); + EvalFloatSvdfReference( + context, node, input, weights_feature, weights_time, bias, params, + data.scratch_tensor_index, activation_state, output); return kTfLiteOk; break; } case kTfLiteInt8: { - EvalIntegerSVDF(context, node, input, weights_feature, weights_time, bias, - params, activation_state, output, data); + EvalIntegerSvdfReference(context, node, input, weights_feature, + weights_time, bias, params, activation_state, + output, data); return kTfLiteOk; break; } @@ -536,7 +86,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration Register_SVDF() { return {/*init=*/Init, /*free=*/nullptr, - /*prepare=*/Prepare, + /*prepare=*/PrepareSvdf, /*invoke=*/Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf.h b/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf.h new file mode 100644 index 00000000..d04787be --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf.h @@ -0,0 +1,71 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" + +namespace tflite { + +struct OpData { + int32_t effective_scale_1_a; + int32_t effective_scale_2_a; + // b versions of each scale are kept at int since the numbers are just the + // shift value - typically between [-32, 32]. + int effective_scale_1_b; + int effective_scale_2_b; + int scratch_tensor_index; + int scratch_output_tensor_index; + + // Cached tensor zero point values for quantized operations. + int input_zero_point; + int output_zero_point; +}; + +// Input tensors. +extern const int kSvdfInputTensor; +extern const int kSvdfWeightsFeatureTensor; +extern const int kSvdfWeightsTimeTensor; +extern const int kSvdfBiasTensor; +// This is a variable tensor, and will be modified by this op. +extern const int kSvdfInputActivationStateTensor; + +// Output tensor. +extern const int kSvdfOutputTensor; + +// TensorflowLite Micro-specific reference implementation for Integer SVDF. +void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpData& data); + +void EvalFloatSvdfReference( + TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* weights_feature, + const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, + const TfLiteSVDFParams* params, int scratch_tensor_index, + TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output); + +TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf_common.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf_common.cc new file mode 100644 index 00000000..12e697b1 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/svdf_common.cc @@ -0,0 +1,469 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/op_macros.h" +#include "tensorflow/lite/micro/kernels/activation_utils.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/svdf.h" +#include "tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +/** + * This version of SVDF is specific to TFLite Micro. It contains the following + * differences between the TFLite version: + * + * 1.) Scratch tensor allocation - scratch tensors must be known ahead of time + * for the Micro interpreter. + * 2.) Output dimensions - the TFLite version determines output size and runtime + * and resizes the output tensor. Micro runtime does not support tensor + * resizing. + */ + +const int kSvdfInputTensor = 0; +const int kSvdfWeightsFeatureTensor = 1; +const int kSvdfWeightsTimeTensor = 2; +const int kSvdfBiasTensor = 3; +const int kSvdfInputActivationStateTensor = + 4; // This is a variable tensor, and will be modified by this op. +const int kSvdfOutputTensor = 0; + +void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpData& data) { + const int n_rank = params->rank; + const int n_batch = input_tensor->dims->data[0]; + const int n_input = input_tensor->dims->data[1]; + const int n_filter = weights_feature_tensor->dims->data[0]; + const int n_unit = n_filter / n_rank; + const int n_memory = weights_time_tensor->dims->data[1]; + + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(context->GetScratchBuffer != nullptr); + + int32_t* scratch_tensor = static_cast( + context->GetScratchBuffer(context, data.scratch_tensor_index)); + int32_t* scratch_output_tensor = static_cast( + context->GetScratchBuffer(context, data.scratch_output_tensor_index)); + + // Shift states. + int16_t* const state_ptr = + tflite::micro::GetTensorData(activation_state_tensor); + + // Left shift the activation_state. + { + int16_t* new_state_start = state_ptr; + const int16_t* old_state_start = state_ptr + 1; + const int16_t* old_state_end = state_ptr + n_batch * n_filter * n_memory; + while (old_state_start != old_state_end) { + *new_state_start++ = *old_state_start++; + } + } + + // Note: no need to clear the latest activation, matmul is not accumulative. + + // Feature matmul. + { + int16_t* state = + tflite::micro::GetTensorData(activation_state_tensor); + const int8_t* input = tflite::micro::GetTensorData(input_tensor); + const int8_t* weight_feature = + tflite::micro::GetTensorData(weights_feature_tensor); + const int32_t output_max = std::numeric_limits::max(); + const int32_t output_min = std::numeric_limits::min(); + int16_t* result_in_batch = state + (n_memory - 1); + for (int b = 0; b < n_batch; b++) { + const int8_t* matrix_ptr = weight_feature; + for (int r = 0; r < n_filter; r++) { + int32_t dot_prod = 0; + const int8_t* vector_in_batch = input + b * n_input; + for (int c = 0; c < n_input; c++) { + dot_prod += + *matrix_ptr++ * (*vector_in_batch++ - data.input_zero_point); + } + dot_prod = MultiplyByQuantizedMultiplier( + dot_prod, data.effective_scale_1_a, data.effective_scale_1_b); + dot_prod = std::min(std::max(output_min, dot_prod), output_max); + // This assumes state is symmetrically quantized. Otherwise last bit of + // state should be initialized to its zero point and accumulate the + // dot_prod. + // Equivalent as the following: + // result_in_batch = zero point, which happens to be zero. + // result_in_batch += dot_prod_56. + *result_in_batch = dot_prod; + result_in_batch += n_memory; + } + } + } + + // Time. + { + for (int b = 0; b < n_batch; ++b) { + int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; + + // Perform batched vector dot product: + const int16_t* vector1_ptr = + tflite::micro::GetTensorData(weights_time_tensor); + const int16_t* vector2_ptr = + tflite::micro::GetTensorData(activation_state_tensor) + + b * n_memory * n_filter; + + for (int i = 0; i < n_filter; i++) { + *scratch_ptr_batch = 0; + for (int j = 0; j < n_memory; j++) { + *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; + } + scratch_ptr_batch++; + } + } + } + + // Reduce, add bias, rescale, activation. + { + // Add bias. + if (bias_tensor) { + // Vector batch assign: + const int32_t* bias_data = + tflite::micro::GetTensorData(bias_tensor); + for (int i = 0; i < n_batch; ++i) { + int32_t* output_ptr = scratch_output_tensor + i * n_unit; + const int32_t* bias_ptr = bias_data; + for (int j = 0; j < n_unit; ++j) { + *output_ptr++ = *bias_ptr++; + } + } + } else { + int32_t* output_ptr = scratch_output_tensor; + for (int i = 0; i < n_batch * n_unit; ++i) { + *output_ptr++ = 0; + } + } + + // Reduce. + for (int b = 0; b < n_batch; ++b) { + int32_t* output_temp_ptr = scratch_output_tensor + b * n_unit; + int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; + + // Reduction sum vector + for (int i = 0; i < n_unit; ++i) { + for (int j = 0; j < n_rank; ++j) { + output_temp_ptr[i] += *scratch_ptr_batch++; + } + } + } + + // Rescale. + const int32_t output_max = std::numeric_limits::max(); + const int32_t output_min = std::numeric_limits::min(); + for (int i = 0; i < n_batch * n_unit; ++i) { + int32_t x1 = scratch_output_tensor[i]; + int32_t x2 = MultiplyByQuantizedMultiplier(x1, data.effective_scale_2_a, + data.effective_scale_2_b); + int32_t x3 = x2 + data.output_zero_point; + int32_t x4 = std::min(std::max(output_min, x3), output_max); + tflite::micro::GetTensorData(output_tensor)[i] = + static_cast(x4); + } + } +} +static inline void ApplyTimeWeightsBiasAndActivation( + int batch_size, int memory_size, int num_filters, int num_units, int rank, + const float* const __restrict__ weights_time_ptr, + const float* const __restrict__ bias_ptr, TfLiteFusedActivation activation, + float* const __restrict__ state_ptr, float* const __restrict__ scratch_ptr, + float* const __restrict__ output_ptr) { + // Compute matmul(activation_state, weights_time). + for (int b = 0; b < batch_size; ++b) { + // Perform batched vector dot product: + float* scratch_ptr_batch = scratch_ptr + b * num_filters; + const float* vector1_ptr = weights_time_ptr; + const float* vector2_ptr = state_ptr + b * memory_size * num_filters; + for (int i = 0; i < num_filters; ++i) { + *scratch_ptr_batch = 0.f; + for (int j = 0; j < memory_size; ++j) { + *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; + } + scratch_ptr_batch++; + } + } + + // Initialize output with bias if provided. + if (bias_ptr) { + // VectorBatchVectorAssign + for (int i = 0; i < batch_size; ++i) { + float* output_data = output_ptr + i * num_units; + const float* bias_data = bias_ptr; + for (int j = 0; j < num_units; ++j) { + *output_data++ = *bias_data++; + } + } + } else { + float* output_data = output_ptr; + for (int i = 0; i < batch_size * num_units; ++i) { + *output_data++ = 0.0f; + } + } + + // Reduction sum. + for (int b = 0; b < batch_size; ++b) { + float* output_ptr_batch = output_ptr + b * num_units; + float* scratch_ptr_batch = scratch_ptr + b * num_filters; + + // Reduction sum vector + for (int i = 0; i < num_units; ++i) { + for (int j = 0; j < rank; j++) { + output_ptr_batch[i] += *scratch_ptr_batch++; + } + } + } + + // Apply activation. + for (int b = 0; b < batch_size; ++b) { + float* output_ptr_batch = output_ptr + b * num_units; + for (int i = 0; i < num_units; ++i) { + *output_ptr_batch = + tflite::ops::micro::ActivationValFloat(activation, *output_ptr_batch); + ++output_ptr_batch; + } + } +} + +void EvalFloatSvdfReference( + TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* weights_feature, + const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, + const TfLiteSVDFParams* params, int scratch_tensor_index, + TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output) { + const int rank = params->rank; + const int batch_size = input->dims->data[0]; + const int input_size = input->dims->data[1]; + const int num_filters = weights_feature->dims->data[0]; + const int num_units = num_filters / rank; + const int memory_size = weights_time->dims->data[1]; + + const float* weights_feature_ptr = + tflite::micro::GetTensorData(weights_feature); + const float* weights_time_ptr = + tflite::micro::GetTensorData(weights_time); + const float* bias_ptr = tflite::micro::GetTensorData(bias); + const float* input_ptr = tflite::micro::GetTensorData(input); + + float* state_ptr = tflite::micro::GetTensorData(activation_state); + + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(context->GetScratchBuffer != nullptr); + + float* scratch_ptr = static_cast( + context->GetScratchBuffer(context, scratch_tensor_index)); + + float* output_ptr = tflite::micro::GetTensorData(output); + + // Left shift the activation_state. + { + float* new_state_start = state_ptr; + const float* old_state_start = state_ptr + 1; + const float* old_state_end = + state_ptr + batch_size * num_filters * memory_size; + while (old_state_start != old_state_end) { + *new_state_start++ = *old_state_start++; + } + } + + // Note: no need to clear the latest activation, matmul is not accumulative. + + // Compute conv1d(inputs, weights_feature). + // The activation_state's rightmost column is used to save current cycle + // activation. This is achieved by starting at state_ptr[memory_size - 1] and + // having the stride equal to memory_size. + + // Perform batched matrix vector multiply operation: + { + const float* matrix = weights_feature_ptr; + const float* vector = input_ptr; + float* result = &state_ptr[memory_size - 1]; + float* result_in_batch = result; + for (int i = 0; i < batch_size; ++i) { + const float* matrix_ptr = matrix; + for (int j = 0; j < num_filters; ++j) { + float dot_prod = 0.0f; + const float* vector_in_batch = vector + i * input_size; + for (int k = 0; k < input_size; ++k) { + dot_prod += *matrix_ptr++ * *vector_in_batch++; + } + *result_in_batch = dot_prod; + result_in_batch += memory_size; + } + } + } + + ApplyTimeWeightsBiasAndActivation( + batch_size, memory_size, num_filters, num_units, rank, weights_time_ptr, + bias_ptr, params->activation, state_ptr, scratch_ptr, output_ptr); +} + +TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto* params = static_cast(node->builtin_data); + + // Validate Tensor Inputs (dtype depends on quantization): + // [0] = Input, {2, batch_size, input_size} + // [1] = Weights Feature, {2, num_filters, input_size} + // [2] = Weights Time, {2, num_filters, memory_size} + // [3] = Bias (optional), {1, num_units} + // [4] = Activation State (variable), + // {2, batch_size, memory_size * num_filters} + const TfLiteTensor* input = GetInput(context, node, kSvdfInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteTensor* weights_feature = + GetInput(context, node, kSvdfWeightsFeatureTensor); + TF_LITE_ENSURE(context, weights_feature != nullptr); + const TfLiteTensor* weights_time = + GetInput(context, node, kSvdfWeightsTimeTensor); + TF_LITE_ENSURE(context, weights_time != nullptr); + const TfLiteTensor* bias = + GetOptionalInputTensor(context, node, kSvdfBiasTensor); + const TfLiteTensor* activation_state = + GetInput(context, node, kSvdfInputActivationStateTensor); + TF_LITE_ENSURE(context, activation_state != nullptr); + + // Define input constants based on input tensor definition above: + const int rank = params->rank; + const int input_size = input->dims->data[1]; + const int batch_size = input->dims->data[0]; + const int num_filters = weights_feature->dims->data[0]; + TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); + const int num_units = num_filters / rank; + const int memory_size = weights_time->dims->data[1]; + + // Validate Input Tensor: + TF_LITE_ENSURE(context, + input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); + + // Validate Tensor Output: + // [0] = float/int8_t, {2, batch_size, num_units} + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + TfLiteTensor* output = GetOutput(context, node, kSvdfOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2); + TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size); + TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units); + + // Validate Weights Feature Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2); + TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size); + + // Validate Weights Time Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2); + TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters); + TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size); + + // Validate Optional Bias Input Tensor: + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); + } + + // Validate Activation State Input Tensor: + TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2); + TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size); + TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1], + memory_size * num_filters); + // Since is_variable is not part of TFLiteEvalTensor, check is_variable here. + TF_LITE_ENSURE_EQ(context, activation_state->is_variable, true); + + TF_LITE_ENSURE_EQ(context, node->inputs->size, 5); + + TFLITE_DCHECK(node->user_data != nullptr); + OpData* data = static_cast(node->user_data); + + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8); + TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteInt16); + TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteInt16); + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); + } + + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); + + const double effective_scale_1 = static_cast( + input->params.scale * weights_feature->params.scale / + activation_state->params.scale); + const double effective_scale_2 = + static_cast(activation_state->params.scale * + weights_time->params.scale / output->params.scale); + + // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready. + TF_LITE_ENSURE( + context, + std::abs(static_cast(bias->params.scale) - + static_cast(activation_state->params.scale * + weights_time->params.scale)) < 1e-5); + + QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a), + &(data->effective_scale_1_b)); + QuantizeMultiplier(effective_scale_2, &(data->effective_scale_2_a), + &(data->effective_scale_2_b)); + + data->input_zero_point = input->params.zero_point; + data->output_zero_point = output->params.zero_point; + + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + + const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( + context, batch_size * num_filters * sizeof(int32_t), + &(data->scratch_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_status); + + const TfLiteStatus scratch_output_status = + context->RequestScratchBufferInArena( + context, batch_size * num_units * sizeof(int32_t), + &(data->scratch_output_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_output_status); + } else { + TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32); + TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32); + TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32); + if (bias != nullptr) { + TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); + } + TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); + + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( + context, batch_size * num_filters * sizeof(float), + &(data->scratch_tensor_index)); + TF_LITE_ENSURE_OK(context, scratch_status); + } + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/transpose_conv.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/transpose_conv.cc new file mode 100644 index 00000000..c49a9980 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/transpose_conv.cc @@ -0,0 +1,269 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/kernels/internal/reference/transpose_conv.h" + +#include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/common.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" +#include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/kernels/padding.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +// For the TfLite transpose_conv implementation, input tensor 0 corresponds to +// the OutputShapeTensor. However, since TFLM does not support dynamic tensors, +// the TFLM implementation ignores input tensor 0 and the only inputs we care +// about are kFilterTensor, kInputTensor and kBiasTensor. +constexpr int kFilterTensor = 1; +constexpr int kInputTensor = 2; +constexpr int kBiasTensor = 3; +constexpr int kOutputTensor = 0; + +// Conv is quantized along dimension 0: +// https://www.tensorflow.org/lite/performance/quantization_spec +constexpr int kConvQuantizedDimension = 0; + +struct OpData { + ConvParams params; + + // A scratch buffer is required for quantized implementations. + int scratch_buffer_index; + + // Multiplier and shift arrays are required for the int8 implementation. + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; +}; + +inline PaddingType RuntimePaddingType(TfLitePadding padding) { + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams* params, int width, + int height, int filter_width, int filter_height, + int out_width, int out_height, + const TfLiteType data_type, OpData* data) { + bool has_bias = node->inputs->size == 4; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 3); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params->padding; + TfLitePaddingValues padding_values = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + params->dilation_height_factor, params->dilation_width_factor, height, + width, filter_height, filter_width, padding, &out_height, &out_width); + + data->params.padding_type = RuntimePaddingType(padding); + data->params.padding_values.width = padding_values.width; + data->params.padding_values.height = padding_values.height; + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. + if (data_type != kTfLiteFloat32) { + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); + TF_LITE_ENSURE(context, filter != nullptr); + const TfLiteTensor* bias = + GetOptionalInputTensor(context, node, kBiasTensor); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + int output_channels = filter->dims->data[kConvQuantizedDimension]; + + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params->activation, + &data->params.output_multiplier, &data->params.output_shift, + &data->params.quantized_activation_min, + &data->params.quantized_activation_max, + data->per_channel_output_multiplier, + reinterpret_cast(data->per_channel_output_shift), + output_channels)); + } + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = static_cast(node->builtin_data); + + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); + TF_LITE_ENSURE(context, filter != nullptr); + + int input_width = input->dims->data[2]; + int input_height = input->dims->data[1]; + int filter_width = filter->dims->data[2]; + int filter_height = filter->dims->data[1]; + int output_width = output->dims->data[2]; + int output_height = output->dims->data[1]; + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + // Quantized kernels use an int32 scratch buffer. + if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + TFLITE_DCHECK(context->RequestScratchBufferInArena( + context, + GetTensorShape(output).FlatSize() * sizeof(int32_t), + &(data->scratch_buffer_index)) == kTfLiteOk); + } + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->zero_point); + + TF_LITE_ENSURE(context, + affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpData( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, input->type, data)); + + // Offsets (zero points) + data->params.input_offset = -input->params.zero_point; + data->params.weights_offset = -filter->params.zero_point; + data->params.output_offset = output->params.zero_point; + + // Stride + dilation + data->params.stride_width = params->stride_width; + data->params.stride_height = params->stride_height; + data->params.dilation_width_factor = params->dilation_width_factor; + data->params.dilation_height_factor = params->dilation_height_factor; + + float output_activation_min, output_activation_max; + CalculateActivationRange(params->activation, &output_activation_min, + &output_activation_max); + data->params.float_activation_min = output_activation_min; + data->params.float_activation_max = output_activation_max; + return kTfLiteOk; +} // namespace conv + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFilterTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 4) + ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG(context, input->type == filter->type, + "Hybrid models are not supported on TFLite Micro."); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { + reference_ops::TransposeConv( + data.params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr); + break; + } + case kTfLiteInt8: { + int32_t* scratch_buffer = static_cast( + context->GetScratchBuffer(context, data.scratch_buffer_index)); + reference_integer_ops::TransposeConv( + data.params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); + break; + } + default: + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_TRANSPOSE_CONV() { + return {/*init=*/Init, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/zeros_like.cc b/code/components/tfmicro/tensorflow/lite/micro/kernels/zeros_like.cc new file mode 100644 index 00000000..ce403927 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/kernels/zeros_like.cc @@ -0,0 +1,89 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "tensorflow/lite/kernels/kernel_util.h" +#include "tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + output->type = input->type; + + return kTfLiteOk; +} + +template +void resetZeros(T* out, const int num_elements) { + for (int i = 0; i < num_elements; ++i) { + out[i] = static_cast(0); + } +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output)); + switch (input->type) { + case kTfLiteInt64: + resetZeros(tflite::micro::GetTensorData(output), flat_size); + break; + case kTfLiteInt32: + resetZeros(tflite::micro::GetTensorData(output), flat_size); + break; + case kTfLiteInt8: + resetZeros(tflite::micro::GetTensorData(output), flat_size); + break; + case kTfLiteFloat32: + resetZeros(tflite::micro::GetTensorData(output), flat_size); + break; + default: + TF_LITE_KERNEL_LOG(context, + "ZerosLike only currently supports int64, int32, " + "and float32, got %d.", + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} +} // namespace + +TfLiteRegistration Register_ZEROS_LIKE() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/memory_helpers.cc b/code/components/tfmicro/tensorflow/lite/micro/memory_helpers.cc index c6180cb4..2d8f7597 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/memory_helpers.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/memory_helpers.cc @@ -48,15 +48,24 @@ size_t AlignSizeUp(size_t size, size_t alignment) { TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) { switch (type) { + case kTfLiteFloat16: + *size = sizeof(int16_t); + break; case kTfLiteFloat32: *size = sizeof(float); break; + case kTfLiteFloat64: + *size = sizeof(double); + break; case kTfLiteInt16: *size = sizeof(int16_t); break; case kTfLiteInt32: *size = sizeof(int32_t); break; + case kTfLiteUInt32: + *size = sizeof(uint32_t); + break; case kTfLiteUInt8: *size = sizeof(uint8_t); break; @@ -66,6 +75,9 @@ TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) { case kTfLiteInt64: *size = sizeof(int64_t); break; + case kTfLiteUInt64: + *size = sizeof(uint64_t); + break; case kTfLiteBool: *size = sizeof(bool); break; diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_allocator.cc b/code/components/tfmicro/tensorflow/lite/micro/micro_allocator.cc index 675a64d2..fb547279 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_allocator.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_allocator.cc @@ -40,7 +40,7 @@ namespace { // Maximum number of scratch buffer requests per operator. Operator kernels that // request more than this value will receive an exception. -constexpr size_t kMaxScratchBuffersPerOp = 8; +constexpr size_t kMaxScratchBuffersPerOp = 12; // Sentinel value used as a placeholder to mark a ScratchBufferRequest request // needs a node id assignment. diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc b/code/components/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc index 6d8361cd..5aba058d 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_error_reporter.cc @@ -16,16 +16,20 @@ limitations under the License. #include "tensorflow/lite/micro/micro_error_reporter.h" #include +#include +#include -#ifndef TF_LITE_STRIP_ERROR_STRINGS +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) #include "tensorflow/lite/micro/debug_log.h" #include "tensorflow/lite/micro/micro_string.h" #endif -namespace tflite { +namespace { +uint8_t micro_error_reporter_buffer[sizeof(tflite::MicroErrorReporter)]; +tflite::MicroErrorReporter* error_reporter_ = nullptr; -int MicroErrorReporter::Report(const char* format, va_list args) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS +void Log(const char* format, va_list args) { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) // Only pulling in the implementation of this function for builds where we // expect to make use of it to be extra cautious about not increasing the code // size. @@ -35,6 +39,29 @@ int MicroErrorReporter::Report(const char* format, va_list args) { DebugLog(log_buffer); DebugLog("\r\n"); #endif +} + +} // namespace + +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) +void MicroPrintf(const char* format, ...) { + va_list args; + va_start(args, format); + Log(format, args); + va_end(args); +} +#endif + +namespace tflite { +ErrorReporter* GetMicroErrorReporter() { + if (error_reporter_ == nullptr) { + error_reporter_ = new (micro_error_reporter_buffer) MicroErrorReporter(); + } + return error_reporter_; +} + +int MicroErrorReporter::Report(const char* format, va_list args) { + Log(format, args); return 0; } diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_error_reporter.h b/code/components/tfmicro/tensorflow/lite/micro/micro_error_reporter.h index e2c073a4..ac45224a 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_error_reporter.h +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_error_reporter.h @@ -20,8 +20,21 @@ limitations under the License. #include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/micro/compatibility.h" +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) +// This function can be used independent of the MicroErrorReporter to get +// printf-like functionalitys and are common to all target platforms. +void MicroPrintf(const char* format, ...); +#else +// We use a #define to ensure that the strings are completely stripped, to +// prevent an unnecessary increase in the binary size. +#define MicroPrintf(format, ...) +#endif + namespace tflite { +// Get a pointer to a singleton global error reporter. +ErrorReporter* GetMicroErrorReporter(); + class MicroErrorReporter : public ErrorReporter { public: ~MicroErrorReporter() override {} diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_interpreter.cc b/code/components/tfmicro/tensorflow/lite/micro/micro_interpreter.cc index 8b003d8b..f01ed641 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_interpreter.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_interpreter.cc @@ -24,6 +24,7 @@ limitations under the License. #include "tensorflow/lite/core/api/tensor_utils.h" #include "tensorflow/lite/micro/memory_helpers.h" #include "tensorflow/lite/micro/micro_allocator.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_op_resolver.h" #include "tensorflow/lite/micro/micro_profiler.h" #include "tensorflow/lite/schema/schema_generated.h" @@ -108,7 +109,7 @@ MicroInterpreter::MicroInterpreter(const Model* model, uint8_t* tensor_arena, size_t tensor_arena_size, ErrorReporter* error_reporter, - tflite::Profiler* profiler) + MicroProfiler* profiler) : model_(model), op_resolver_(op_resolver), error_reporter_(error_reporter), @@ -118,8 +119,8 @@ MicroInterpreter::MicroInterpreter(const Model* model, initialization_status_(kTfLiteError), eval_tensors_(nullptr), context_helper_(error_reporter_, &allocator_, model), - input_tensor_(nullptr), - output_tensor_(nullptr) { + input_tensors_(nullptr), + output_tensors_(nullptr) { Init(profiler); } @@ -127,7 +128,7 @@ MicroInterpreter::MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, MicroAllocator* allocator, ErrorReporter* error_reporter, - tflite::Profiler* profiler) + MicroProfiler* profiler) : model_(model), op_resolver_(op_resolver), error_reporter_(error_reporter), @@ -136,8 +137,8 @@ MicroInterpreter::MicroInterpreter(const Model* model, initialization_status_(kTfLiteError), eval_tensors_(nullptr), context_helper_(error_reporter_, &allocator_, model), - input_tensor_(nullptr), - output_tensor_(nullptr) { + input_tensors_(nullptr), + output_tensors_(nullptr) { Init(profiler); } @@ -156,7 +157,7 @@ MicroInterpreter::~MicroInterpreter() { } } -void MicroInterpreter::Init(tflite::Profiler* profiler) { +void MicroInterpreter::Init(MicroProfiler* profiler) { const flatbuffers::Vector>* subgraphs = model_->subgraphs(); if (subgraphs->size() != 1) { @@ -177,46 +178,6 @@ void MicroInterpreter::Init(tflite::Profiler* profiler) { initialization_status_ = kTfLiteOk; } -void MicroInterpreter::CorrectTensorEndianness(TfLiteEvalTensor* tensorCorr) { - int32_t tensorSize = 1; - for (int d = 0; d < tensorCorr->dims->size; ++d) - tensorSize *= reinterpret_cast(tensorCorr->dims->data)[d]; - - switch (tensorCorr->type) { - case TfLiteType::kTfLiteFloat32: - CorrectTensorDataEndianness(tensorCorr->data.f, tensorSize); - break; - case TfLiteType::kTfLiteFloat16: - CorrectTensorDataEndianness(tensorCorr->data.f16, tensorSize); - break; - case TfLiteType::kTfLiteInt64: - CorrectTensorDataEndianness(tensorCorr->data.i64, tensorSize); - break; - case TfLiteType::kTfLiteInt32: - CorrectTensorDataEndianness(tensorCorr->data.i32, tensorSize); - break; - case TfLiteType::kTfLiteInt16: - CorrectTensorDataEndianness(tensorCorr->data.i16, tensorSize); - break; - case TfLiteType::kTfLiteComplex64: - CorrectTensorDataEndianness(tensorCorr->data.c64, tensorSize); - break; - case TfLiteType::kTfLiteComplex128: - CorrectTensorDataEndianness(tensorCorr->data.c128, tensorSize); - break; - default: - // Do nothing for other data types. - break; - } -} - -template -void MicroInterpreter::CorrectTensorDataEndianness(T* data, int32_t size) { - for (int32_t i = 0; i < size; ++i) { - data[i] = flatbuffers::EndianScalar(data[i]); - } -} - TfLiteStatus MicroInterpreter::AllocateTensors() { if (allocator_.StartModelAllocation(model_, op_resolver_, &node_and_registrations_, @@ -234,28 +195,6 @@ TfLiteStatus MicroInterpreter::AllocateTensors() { context_helper_.SetTfLiteEvalTensors(eval_tensors_); context_.tensors_size = subgraph_->tensors()->size(); - // If the system is big endian then convert weights from the flatbuffer from - // little to big endian on startup so that it does not need to be done during - // inference. - // NOTE: This requires that the flatbuffer is held in memory which can be - // modified by this process. - if (!FLATBUFFERS_LITTLEENDIAN) { - for (size_t t = 0; t < subgraph_->tensors()->size(); ++t) { - if (auto* buffer = - (*model_->buffers())[subgraph_->tensors()->Get(t)->buffer()]) { - // If we've found a buffer, does it have any data? - if (auto* array = buffer->data()) { - // If it has any data, is the data size larger than zero? - if (array->size()) { - // Update the endianness of the corresponding eval tensor since that - // struct holds the buffer used at inference time. - CorrectTensorEndianness(&eval_tensors_[t]); - } - } - } - } - } - // Only allow AllocatePersistentBuffer in Init stage. context_.AllocatePersistentBuffer = context_helper_.AllocatePersistentBuffer; context_.RequestScratchBufferInArena = nullptr; @@ -311,6 +250,54 @@ TfLiteStatus MicroInterpreter::AllocateTensors() { // TODO(b/16157777): Remove this when ContextHelper is rolled into this class. context_helper_.SetScratchBufferHandles(scratch_buffer_handles_); + // TODO(b/162311891): Drop these allocations when the interpreter supports + // handling buffers from TfLiteEvalTensor. + input_tensors_ = + reinterpret_cast(allocator_.AllocatePersistentBuffer( + sizeof(TfLiteTensor*) * inputs_size())); + if (input_tensors_ == nullptr) { + TF_LITE_REPORT_ERROR( + error_reporter_, + "Failed to allocate memory for context->input_tensors_, " + "%d bytes required", + sizeof(TfLiteTensor*) * inputs_size()); + return kTfLiteError; + } + + for (size_t i = 0; i < inputs_size(); ++i) { + input_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( + model_, eval_tensors_, inputs().Get(i)); + if (input_tensors_[i] == nullptr) { + TF_LITE_REPORT_ERROR(error_reporter_, + "Failed to initialize input tensor %d", i); + return kTfLiteError; + } + } + + // TODO(b/162311891): Drop these allocations when the interpreter supports + // handling buffers from TfLiteEvalTensor. + output_tensors_ = + reinterpret_cast(allocator_.AllocatePersistentBuffer( + sizeof(TfLiteTensor*) * outputs_size())); + if (output_tensors_ == nullptr) { + TF_LITE_REPORT_ERROR( + error_reporter_, + "Failed to allocate memory for context->output_tensors_, " + "%d bytes required", + sizeof(TfLiteTensor*) * outputs_size()); + return kTfLiteError; + } + + for (size_t i = 0; i < outputs_size(); ++i) { + output_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( + model_, eval_tensors_, outputs().Get(i)); + if (output_tensors_[i] == nullptr) { + TF_LITE_REPORT_ERROR(error_reporter_, + "Failed to initialize output tensor %d", i); + return kTfLiteError; + } + } + TF_LITE_ENSURE_STATUS(ResetVariableTensors()); tensors_allocated_ = true; @@ -334,35 +321,35 @@ TfLiteStatus MicroInterpreter::Invoke() { auto* node = &(node_and_registrations_[i].node); auto* registration = node_and_registrations_[i].registration; - if (registration->invoke) { - TfLiteStatus invoke_status; -#ifndef NDEBUG // Omit profiler overhead from release builds. - // The case where profiler == nullptr is handled by - // ScopedOperatorProfile. - tflite::Profiler* profiler = - reinterpret_cast(context_.profiler); - ScopedOperatorProfile scoped_profiler( - profiler, OpNameFromRegistration(registration), i); +// This ifdef is needed (even though ScopedMicroProfiler itself is a no-op with +// -DTF_LITE_STRIP_ERROR_STRINGS) because the function OpNameFromRegistration is +// only defined for builds with the error strings. +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + ScopedMicroProfiler scoped_profiler( + OpNameFromRegistration(registration), + reinterpret_cast(context_.profiler)); #endif - invoke_status = registration->invoke(&context_, node); - // All TfLiteTensor structs used in the kernel are allocated from temp - // memory in the allocator. This creates a chain of allocations in the - // temp section. The call below resets the chain of allocations to - // prepare for the next call. - allocator_.ResetTempAllocations(); + TFLITE_DCHECK(registration->invoke); + TfLiteStatus invoke_status = registration->invoke(&context_, node); - if (invoke_status == kTfLiteError) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Node %s (number %d) failed to invoke with status %d", - OpNameFromRegistration(registration), i, invoke_status); - return kTfLiteError; - } else if (invoke_status != kTfLiteOk) { - return invoke_status; - } + // All TfLiteTensor structs used in the kernel are allocated from temp + // memory in the allocator. This creates a chain of allocations in the + // temp section. The call below resets the chain of allocations to + // prepare for the next call. + allocator_.ResetTempAllocations(); + + if (invoke_status == kTfLiteError) { + TF_LITE_REPORT_ERROR( + error_reporter_, + "Node %s (number %d) failed to invoke with status %d", + OpNameFromRegistration(registration), i, invoke_status); + return kTfLiteError; + } else if (invoke_status != kTfLiteOk) { + return invoke_status; } } + return kTfLiteOk; } @@ -374,20 +361,7 @@ TfLiteTensor* MicroInterpreter::input(size_t index) { length); return nullptr; } - if (index != 0) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Input tensors not at index 0 are allocated from the " - "persistent memory arena. Repeat calls will cause excess " - "allocation!"); - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - inputs().Get(index)); - } - if (input_tensor_ == nullptr) { - input_tensor_ = allocator_.AllocatePersistentTfLiteTensor( - model_, eval_tensors_, inputs().Get(index)); - } - return input_tensor_; + return input_tensors_[index]; } TfLiteTensor* MicroInterpreter::output(size_t index) { @@ -398,22 +372,7 @@ TfLiteTensor* MicroInterpreter::output(size_t index) { length); return nullptr; } - if (index != 0) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Output tensors not at index 0 are allocated from the " - "persistent memory arena. Repeat calls will cause excess " - "allocation!"); - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - outputs().Get(index)); - } - if (output_tensor_ == nullptr) { - // TODO(b/162311891): Drop these allocations when the interpreter supports - // handling buffers from TfLiteEvalTensor. - output_tensor_ = allocator_.AllocatePersistentTfLiteTensor( - model_, eval_tensors_, outputs().Get(index)); - } - return output_tensor_; + return output_tensors_[index]; } TfLiteTensor* MicroInterpreter::tensor(size_t index) { diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_interpreter.h b/code/components/tfmicro/tensorflow/lite/micro/micro_interpreter.h index 31720c8e..39fb09b2 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_interpreter.h +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_interpreter.h @@ -21,13 +21,17 @@ limitations under the License. #include "flatbuffers/flatbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/profiler.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/micro/micro_allocator.h" #include "tensorflow/lite/micro/micro_op_resolver.h" +#include "tensorflow/lite/micro/micro_profiler.h" #include "tensorflow/lite/portable_type_to_tflitetype.h" #include "tensorflow/lite/schema/schema_generated.h" +// Copied from tensorflow/lite/version.h to avoid a dependency chain into +// tensorflow/core. +#define TFLITE_SCHEMA_VERSION (3) + namespace tflite { namespace internal { @@ -82,7 +86,7 @@ class MicroInterpreter { MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, uint8_t* tensor_arena, size_t tensor_arena_size, ErrorReporter* error_reporter, - tflite::Profiler* profiler = nullptr); + MicroProfiler* profiler = nullptr); // Create an interpreter instance using an existing MicroAllocator instance. // This constructor should be used when creating an allocator that needs to @@ -91,7 +95,7 @@ class MicroInterpreter { // as long as that of the interpreter object. MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, MicroAllocator* allocator, ErrorReporter* error_reporter, - tflite::Profiler* profiler = nullptr); + MicroProfiler* profiler = nullptr); ~MicroInterpreter(); @@ -175,12 +179,7 @@ class MicroInterpreter { private: // TODO(b/158263161): Consider switching to Create() function to enable better // error reporting during initialization. - void Init(tflite::Profiler* profiler); - - void CorrectTensorEndianness(TfLiteEvalTensor* tensorCorr); - - template - void CorrectTensorDataEndianness(T* data, int32_t size); + void Init(MicroProfiler* profiler); NodeAndRegistration* node_and_registrations_ = nullptr; @@ -202,8 +201,8 @@ class MicroInterpreter { // TODO(b/162311891): Clean these pointers up when this class supports buffers // from TfLiteEvalTensor. - TfLiteTensor* input_tensor_; - TfLiteTensor* output_tensor_; + TfLiteTensor** input_tensors_; + TfLiteTensor** output_tensors_; }; } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_mutable_op_resolver.h b/code/components/tfmicro/tensorflow/lite/micro/micro_mutable_op_resolver.h index 0175c8db..44d40342 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_mutable_op_resolver.h +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_mutable_op_resolver.h @@ -24,16 +24,20 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/compatibility.h" +#include "tensorflow/lite/micro/kernels/ethosu.h" #include "tensorflow/lite/micro/kernels/fully_connected.h" #include "tensorflow/lite/micro/kernels/micro_ops.h" #include "tensorflow/lite/micro/micro_op_resolver.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { +TfLiteRegistration* Register_DETECTION_POSTPROCESS(); template class MicroMutableOpResolver : public MicroOpResolver { public: + TF_LITE_REMOVE_VIRTUAL_DELETE + explicit MicroMutableOpResolver(ErrorReporter* error_reporter = nullptr) : error_reporter_(error_reporter) {} @@ -118,6 +122,11 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseAdd); } + TfLiteStatus AddAddN() { + return AddBuiltin(BuiltinOperator_ADD_N, tflite::Register_ADD_N(), + ParseAddN); + } + TfLiteStatus AddArgMax() { return AddBuiltin(BuiltinOperator_ARG_MAX, tflite::ops::micro::Register_ARG_MAX(), ParseArgMax); @@ -134,6 +143,15 @@ class MicroMutableOpResolver : public MicroOpResolver { ParsePool); } + TfLiteStatus AddBatchToSpaceNd() { + return AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, + Register_BATCH_TO_SPACE_ND(), ParseBatchToSpaceNd); + } + + TfLiteStatus AddCast() { + return AddBuiltin(BuiltinOperator_CAST, Register_CAST(), ParseCast); + } + TfLiteStatus AddCeil() { return AddBuiltin(BuiltinOperator_CEIL, tflite::ops::micro::Register_CEIL(), ParseCeil); @@ -170,11 +188,41 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseDequantize); } + TfLiteStatus AddDetectionPostprocess() { + return AddCustom("TFLite_Detection_PostProcess", + tflite::Register_DETECTION_POSTPROCESS()); + } + + TfLiteStatus AddDiv() { + return AddBuiltin(BuiltinOperator_DIV, tflite::Register_DIV(), ParseDiv); + } + + TfLiteStatus AddElu() { + return AddBuiltin(BuiltinOperator_ELU, tflite::Register_ELU(), ParseElu); + } + TfLiteStatus AddEqual() { return AddBuiltin(BuiltinOperator_EQUAL, tflite::ops::micro::Register_EQUAL(), ParseEqual); } + TfLiteStatus AddEthosU() { + TfLiteRegistration* registration = tflite::Register_ETHOSU(); + if (registration) { + return AddCustom(tflite::GetString_ETHOSU(), registration); + } + return kTfLiteOk; + } + + TfLiteStatus AddExp() { + return AddBuiltin(BuiltinOperator_EXP, Register_EXP(), ParseExp); + } + + TfLiteStatus AddExpandDims() { + return AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS(), + ParseExpandDims); + } + TfLiteStatus AddFloor() { return AddBuiltin(BuiltinOperator_FLOOR, tflite::ops::micro::Register_FLOOR(), ParseFloor); @@ -209,6 +257,16 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseL2Normalization); } + TfLiteStatus AddL2Pool2D() { + return AddBuiltin(BuiltinOperator_L2_POOL_2D, tflite::Register_L2_POOL_2D(), + ParsePool); + } + + TfLiteStatus AddLeakyRelu() { + return AddBuiltin(BuiltinOperator_LEAKY_RELU, tflite::Register_LEAKY_RELU(), + ParseLeakyRelu); + } + TfLiteStatus AddLess() { return AddBuiltin(BuiltinOperator_LESS, tflite::ops::micro::Register_LESS(), ParseLess); @@ -358,6 +416,11 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseSoftmax); } + TfLiteStatus AddSpaceToBatchNd() { + return AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, + Register_SPACE_TO_BATCH_ND(), ParseSpaceToBatchNd); + } + TfLiteStatus AddSplit() { return AddBuiltin(BuiltinOperator_SPLIT, tflite::ops::micro::Register_SPLIT(), ParseSplit); @@ -368,6 +431,11 @@ class MicroMutableOpResolver : public MicroOpResolver { tflite::ops::micro::Register_SPLIT_V(), ParseSplitV); } + TfLiteStatus AddSqueeze() { + return AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE(), + ParseSqueeze); + } + TfLiteStatus AddSqrt() { return AddBuiltin(BuiltinOperator_SQRT, tflite::ops::micro::Register_SQRT(), ParseSqrt); @@ -398,16 +466,24 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseTanh); } + TfLiteStatus AddTransposeConv() { + return AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, + tflite::Register_TRANSPOSE_CONV(), ParseTransposeConv); + } + TfLiteStatus AddUnpack() { return AddBuiltin(BuiltinOperator_UNPACK, tflite::ops::micro::Register_UNPACK(), ParseUnpack); } + TfLiteStatus AddZerosLike() { + return AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE(), + ParseZerosLike); + } + unsigned int GetRegistrationLength() { return registrations_len_; } private: - TF_LITE_REMOVE_VIRTUAL_DELETE - TfLiteStatus AddBuiltin(tflite::BuiltinOperator op, const TfLiteRegistration& registration, MicroOpResolver::BuiltinParseFunction parser) { diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_profiler.cc b/code/components/tfmicro/tensorflow/lite/micro/micro_profiler.cc index 83fb9f64..792d8ae0 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_profiler.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_profiler.cc @@ -12,31 +12,47 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ - #include "tensorflow/lite/micro/micro_profiler.h" +#include + #include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/lite/micro/micro_error_reporter.h" #include "tensorflow/lite/micro/micro_time.h" namespace tflite { -MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter) - : reporter_(reporter) {} +uint32_t MicroProfiler::BeginEvent(const char* tag) { + if (num_events_ == kMaxEvents) { + num_events_ = 0; + } -uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) { - start_time_ = GetCurrentTimeTicks(); - TFLITE_DCHECK(tag != nullptr); - event_tag_ = tag; - return 0; + tags_[num_events_] = tag; + start_ticks_[num_events_] = GetCurrentTimeTicks(); + end_ticks_[num_events_] = start_ticks_[num_events_] - 1; + return num_events_++; } void MicroProfiler::EndEvent(uint32_t event_handle) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - int32_t end_time = GetCurrentTimeTicks(); - TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_, - end_time - start_time_); + TFLITE_DCHECK(event_handle < kMaxEvents); + end_ticks_[event_handle] = GetCurrentTimeTicks(); +} + +int32_t MicroProfiler::GetTotalTicks() const { + int32_t ticks = 0; + for (int i = 0; i < num_events_; ++i) { + ticks += end_ticks_[i] - start_ticks_[i]; + } + return ticks; +} + +void MicroProfiler::Log() const { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + for (int i = 0; i < num_events_; ++i) { + int32_t ticks = end_ticks_[i] - start_ticks_[i]; + MicroPrintf("%s took %d ticks (%d ms).", tags_[i], ticks, TicksToMs(ticks)); + } #endif } + } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_profiler.h b/code/components/tfmicro/tensorflow/lite/micro/micro_profiler.h index a3144b3a..a75375be 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_profiler.h +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_profiler.h @@ -16,8 +16,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ #define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/profiler.h" +#include + #include "tensorflow/lite/micro/compatibility.h" namespace tflite { @@ -26,46 +26,93 @@ namespace tflite { // performance. Bottleck operators can be identified along with slow code // sections. This can be used in conjunction with running the relevant micro // benchmark to evaluate end-to-end performance. -// -// Usage example: -// MicroProfiler profiler(error_reporter); -// { -// ScopedProfile scoped_profile(profiler, tag); -// work_to_profile(); -// } -// -// This will call the following methods in order: -// int event_handle = profiler->BeginEvent(op_name, EventType::DEFAULT, 0) -// work_to_profile(); -// profiler->EndEvent(event_handle) -class MicroProfiler : public tflite::Profiler { +class MicroProfiler { public: - explicit MicroProfiler(tflite::ErrorReporter* reporter); - ~MicroProfiler() override = default; + MicroProfiler() = default; + virtual ~MicroProfiler() = default; - // AddEvent is unused for Tf Micro. - void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata1, - int64_t event_metadata2) override{}; + // Marks the start of a new event and returns an event handle that can be used + // to mark the end of the event via EndEvent. The lifetime of the tag + // parameter must exceed that of the MicroProfiler. + virtual uint32_t BeginEvent(const char* tag); - // BeginEvent followed by code followed by EndEvent will profile the code - // enclosed. Multiple concurrent events are unsupported, so the return value - // is always 0. Event_metadata1 and event_metadata2 are unused. The tag - // pointer must be valid until EndEvent is called. - uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) override; + // Marks the end of an event associated with event_handle. It is the + // responsibility of the caller to ensure than EndEvent is called once and + // only once per event_handle. + // + // If EndEvent is called more than once for the same event_handle, the last + // call will be used as the end of event marker.If EndEvent is called 0 times + // for a particular event_handle, the duration of that event will be 0 ticks. + virtual void EndEvent(uint32_t event_handle); - // Event_handle is ignored since TF Micro does not support concurrent events. - void EndEvent(uint32_t event_handle) override; + // Clears all the events that have been currently profiled. + void ClearEvents() { num_events_ = 0; } + + // Returns the sum of the ticks taken across all the events. This number + // is only meaningful if all of the events are disjoint (the end time of + // event[i] <= start time of event[i+1]). + int32_t GetTotalTicks() const; + + // Prints the profiling information of each of the events. + void Log() const; private: - tflite::ErrorReporter* reporter_; - int32_t start_time_; - const char* event_tag_; - TF_LITE_REMOVE_VIRTUAL_DELETE + // Maximum number of events that this class can keep track of. If we call + // AddEvent more than kMaxEvents number of times, then the oldest event's + // profiling information will be overwritten. + static constexpr int kMaxEvents = 50; + + const char* tags_[kMaxEvents]; + int32_t start_ticks_[kMaxEvents]; + int32_t end_ticks_[kMaxEvents]; + int num_events_ = 0; + + TF_LITE_REMOVE_VIRTUAL_DELETE; }; +#if defined(NDEBUG) +// For release builds, the ScopedMicroProfiler is a noop. +// +// This is done because the ScipedProfiler is used as part of the +// MicroInterpreter and we want to ensure zero overhead for the release builds. +class ScopedMicroProfiler { + public: + explicit ScopedMicroProfiler(const char* tag, MicroProfiler* profiler) {} +}; + +#else + +// This class can be used to add events to a MicroProfiler object that span the +// lifetime of the ScopedMicroProfiler object. +// Usage example: +// +// MicroProfiler profiler(); +// ... +// { +// ScopedMicroProfiler scoped_profiler("custom_tag", profiler); +// work_to_profile(); +// } +class ScopedMicroProfiler { + public: + explicit ScopedMicroProfiler(const char* tag, MicroProfiler* profiler) + : profiler_(profiler) { + if (profiler_ != nullptr) { + event_handle_ = profiler_->BeginEvent(tag); + } + } + + ~ScopedMicroProfiler() { + if (profiler_ != nullptr) { + profiler_->EndEvent(event_handle_); + } + } + + private: + uint32_t event_handle_ = 0; + MicroProfiler* profiler_ = nullptr; +}; +#endif // !defined(NDEBUG) + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_time.cc b/code/components/tfmicro/tensorflow/lite/micro/micro_time.cc index 09119de8..d7c51f90 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_time.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_time.cc @@ -27,8 +27,14 @@ limitations under the License. #include "tensorflow/lite/micro/micro_time.h" +#if defined(TF_LITE_USE_CTIME) +#include +#endif + namespace tflite { +#if !defined(TF_LITE_USE_CTIME) + // Reference implementation of the ticks_per_second() function that's required // for a platform to support Tensorflow Lite for Microcontrollers profiling. // This returns 0 by default because timing is an optional feature that builds @@ -41,4 +47,13 @@ int32_t ticks_per_second() { return 0; } // that builds without errors on platforms that do not need it. int32_t GetCurrentTimeTicks() { return 0; } +#else // defined(TF_LITE_USE_CTIME) + +// For platforms that support ctime, we implment the micro_time interface in +// this central location. +int32_t ticks_per_second() { return CLOCKS_PER_SEC; } + +int32_t GetCurrentTimeTicks() { return clock(); } +#endif + } // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/micro_time.h b/code/components/tfmicro/tensorflow/lite/micro/micro_time.h index 465490a8..fac9069b 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/micro_time.h +++ b/code/components/tfmicro/tensorflow/lite/micro/micro_time.h @@ -15,7 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ #define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ -#include +#include namespace tflite { @@ -26,6 +26,11 @@ int32_t ticks_per_second(); // Return time in ticks. The meaning of a tick varies per platform. int32_t GetCurrentTimeTicks(); +inline int32_t TicksToMs(int32_t ticks) { + return static_cast(1000.0f * static_cast(ticks) / + static_cast(ticks_per_second())); +} + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/system_setup.cc b/code/components/tfmicro/tensorflow/lite/micro/system_setup.cc new file mode 100644 index 00000000..db4a1007 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/system_setup.cc @@ -0,0 +1,25 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/lite/micro/system_setup.h" + +namespace tflite { + +// To add an equivalent function for your own platform, create your own +// implementation file, and place it in a subfolder named after the target. See +// tensorflow/lite/micro/debug_log.cc for a similar example. +void InitializeTarget() {} + +} // namespace tflite diff --git a/code/components/tfmicro/tensorflow/lite/micro/system_setup.h b/code/components/tfmicro/tensorflow/lite/micro/system_setup.h new file mode 100644 index 00000000..71ab13a8 --- /dev/null +++ b/code/components/tfmicro/tensorflow/lite/micro/system_setup.h @@ -0,0 +1,27 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_SYSTEM_SETUP_H_ +#define TENSORFLOW_LITE_MICRO_SYSTEM_SETUP_H_ + +namespace tflite { + +// This should called during initialization of TFLM binaries and tests. It can +// be specialized if there is a need for custom target-specific intialization. +// For more information, see tensorflow/lite/micro/system_setup.cc. +void InitializeTarget(); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_SYSTEM_SETUP_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/test_helpers.cc b/code/components/tfmicro/tensorflow/lite/micro/test_helpers.cc index 897f3110..f73073f6 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/test_helpers.cc +++ b/code/components/tfmicro/tensorflow/lite/micro/test_helpers.cc @@ -570,6 +570,74 @@ const Model* BuildComplexMockModel() { return model; } +const Model* BuildSimpleMultipleInputsModel() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {1}; + constexpr size_t tensors_size = 4; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_input_tensor1"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_input_tensor2"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_input_tensor3"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_output_tensor"), 0, false), + }; + constexpr size_t inputs_size = 3; + const int32_t inputs[inputs_size] = {0, 1, 2}; + constexpr size_t outputs_size = 1; + const int32_t outputs[outputs_size] = {3}; + constexpr size_t operator_inputs_size = 3; + const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {3}; + constexpr size_t operators_size = 1; + const Offset operators[operators_size] = { + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + } // namespace const TfLiteRegistration* SimpleStatefulOp::getRegistration() { @@ -704,12 +772,66 @@ TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) { bool MockCustom::freed_ = false; +const TfLiteRegistration* MultipleInputs::getRegistration() { + return GetMutableRegistration(); +} + +TfLiteRegistration* MultipleInputs::GetMutableRegistration() { + static TfLiteRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + r.free = Free; + return &r; +} + +void* MultipleInputs::Init(TfLiteContext* context, const char* buffer, + size_t length) { + // We don't support delegate in TFL micro. This is a weak check to test if + // context struct being zero-initialized. + TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); + freed_ = false; + // Do nothing. + return nullptr; +} + +void MultipleInputs::Free(TfLiteContext* context, void* buffer) { + freed_ = true; +} + +TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) { + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); + const int32_t* input_data = input->data.i32; + const TfLiteTensor* input1; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input1)); + const int32_t* input_data1 = input1->data.i32; + const TfLiteTensor* input2; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input2)); + const int32_t* input_data2 = input2->data.i32; + + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); + int32_t* output_data = output->data.i32; + output_data[0] = + 0; // Catch output tensor sharing memory with an input tensor + output_data[0] = input_data[0] + input_data1[0] + input_data2[0]; + return kTfLiteOk; +} + +bool MultipleInputs::freed_ = false; + AllOpsResolver GetOpResolver() { AllOpsResolver op_resolver; op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration()); op_resolver.AddCustom("simple_stateful_op", SimpleStatefulOp::GetMutableRegistration()); - + op_resolver.AddCustom("multiple_inputs_op", + MultipleInputs::GetMutableRegistration()); return op_resolver; } @@ -721,6 +843,14 @@ const Model* GetSimpleMockModel() { return model; } +const Model* GetSimpleMultipleInputsModel() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleMultipleInputsModel()); + } + return model; +} + const Model* GetComplexMockModel() { static Model* model = nullptr; if (!model) { diff --git a/code/components/tfmicro/tensorflow/lite/micro/test_helpers.h b/code/components/tfmicro/tensorflow/lite/micro/test_helpers.h index 1db0d81f..4c8b7c20 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/test_helpers.h +++ b/code/components/tfmicro/tensorflow/lite/micro/test_helpers.h @@ -76,6 +76,20 @@ class MockCustom { static bool freed_; }; +// A simple operator with the purpose of testing multiple inputs. It returns +// the sum of the inputs. +class MultipleInputs { + public: + static const TfLiteRegistration* getRegistration(); + static TfLiteRegistration* GetMutableRegistration(); + static void* Init(TfLiteContext* context, const char* buffer, size_t length); + static void Free(TfLiteContext* context, void* buffer); + static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); + static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); + + static bool freed_; +}; + // Returns an Op Resolver that can be used in the testing code. AllOpsResolver GetOpResolver(); @@ -90,6 +104,10 @@ const Model* GetComplexMockModel(); // Returns a simple flatbuffer model with two branches. const Model* GetSimpleModelWithBranch(); +// Returns a simple example flatbuffer TensorFlow Lite model. Contains 3 inputs, +// 1 output Tensor, and 1 operator. +const Model* GetSimpleMultipleInputsModel(); + // Returns a simple flatbuffer model with offline planned tensors // @param[in] num_tensors Number of tensors in the model. // @param[in] metadata_buffer Metadata for offline planner. diff --git a/code/components/tfmicro/tensorflow/lite/micro/testing/micro_test.h b/code/components/tfmicro/tensorflow/lite/micro/testing/micro_test.h index d74d8f4f..229dfa6f 100644 --- a/code/components/tfmicro/tensorflow/lite/micro/testing/micro_test.h +++ b/code/components/tfmicro/tensorflow/lite/micro/testing/micro_test.h @@ -56,183 +56,185 @@ limitations under the License. #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/micro/micro_error_reporter.h" +#include "tensorflow/lite/micro/system_setup.h" namespace micro_test { extern int tests_passed; extern int tests_failed; extern bool is_test_complete; extern bool did_test_fail; -extern tflite::ErrorReporter* reporter; } // namespace micro_test -#define TF_LITE_MICRO_TESTS_BEGIN \ - namespace micro_test { \ - int tests_passed; \ - int tests_failed; \ - bool is_test_complete; \ - bool did_test_fail; \ - tflite::ErrorReporter* reporter; \ - } \ - \ - int main(int argc, char** argv) { \ - micro_test::tests_passed = 0; \ - micro_test::tests_failed = 0; \ - tflite::MicroErrorReporter error_reporter; \ - micro_test::reporter = &error_reporter; +namespace tflite { -#define TF_LITE_MICRO_TESTS_END \ - micro_test::reporter->Report( \ - "%d/%d tests passed", micro_test::tests_passed, \ - (micro_test::tests_failed + micro_test::tests_passed)); \ - if (micro_test::tests_failed == 0) { \ - micro_test::reporter->Report("~~~ALL TESTS PASSED~~~\n"); \ - return kTfLiteOk; \ - } else { \ - micro_test::reporter->Report("~~~SOME TESTS FAILED~~~\n"); \ - return kTfLiteError; \ - } \ +// This additional helper function is used (instead of directly calling +// tflite::InitializeTarget from the TF_LITE_MICRO_TESTS_BEGIN macro) to avoid +// adding a dependency from every bazel test target to micro:system_setp (which +// is the target that implements InitializeTarget(). +// +// The underlying issue here is that the use of the macros results in +// dependencies that can be containted within the micro/testing:micro_test +// target bleeding on to all the tests. +inline void InitializeTest() { InitializeTarget(); } +} // namespace tflite + +#define TF_LITE_MICRO_TESTS_BEGIN \ + namespace micro_test { \ + int tests_passed; \ + int tests_failed; \ + bool is_test_complete; \ + bool did_test_fail; \ + } \ + \ + int main(int argc, char** argv) { \ + micro_test::tests_passed = 0; \ + micro_test::tests_failed = 0; \ + tflite::InitializeTest(); + +#define TF_LITE_MICRO_TESTS_END \ + MicroPrintf("%d/%d tests passed", micro_test::tests_passed, \ + (micro_test::tests_failed + micro_test::tests_passed)); \ + if (micro_test::tests_failed == 0) { \ + MicroPrintf("~~~ALL TESTS PASSED~~~\n"); \ + return kTfLiteOk; \ + } else { \ + MicroPrintf("~~~SOME TESTS FAILED~~~\n"); \ + return kTfLiteError; \ + } \ } // TODO(petewarden): I'm going to hell for what I'm doing to this poor for loop. #define TF_LITE_MICRO_TEST(name) \ - micro_test::reporter->Report("Testing " #name); \ + MicroPrintf("Testing " #name); \ for (micro_test::is_test_complete = false, \ micro_test::did_test_fail = false; \ !micro_test::is_test_complete; micro_test::is_test_complete = true, \ micro_test::tests_passed += (micro_test::did_test_fail) ? 0 : 1, \ micro_test::tests_failed += (micro_test::did_test_fail) ? 1 : 0) -#define TF_LITE_MICRO_EXPECT(x) \ - do { \ - if (!(x)) { \ - micro_test::reporter->Report(#x " failed at %s:%d", __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ +#define TF_LITE_MICRO_EXPECT(x) \ + do { \ + if (!(x)) { \ + MicroPrintf(#x " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ } while (false) // TODO(b/139142772): this macro is used with types other than ints even though // the printf specifier is %d. -#define TF_LITE_MICRO_EXPECT_EQ(x, y) \ - do { \ - auto vx = x; \ - auto vy = y; \ - if ((vx) != (vy)) { \ - micro_test::reporter->Report(#x " == " #y " failed at %s:%d (%d vs %d)", \ - __FILE__, __LINE__, static_cast(vx), \ - static_cast(vy)); \ - micro_test::did_test_fail = true; \ - } \ +#define TF_LITE_MICRO_EXPECT_EQ(x, y) \ + do { \ + auto vx = x; \ + auto vy = y; \ + if ((vx) != (vy)) { \ + MicroPrintf(#x " == " #y " failed at %s:%d (%d vs %d)", __FILE__, \ + __LINE__, static_cast(vx), static_cast(vy)); \ + micro_test::did_test_fail = true; \ + } \ } while (false) -#define TF_LITE_MICRO_EXPECT_NE(x, y) \ - do { \ - if ((x) == (y)) { \ - micro_test::reporter->Report(#x " != " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -// TODO(wangtz): Making it more generic once needed. -#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \ - epsilon) \ +#define TF_LITE_MICRO_EXPECT_NE(x, y) \ do { \ - auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \ - ? ((arr1)[(idx1)] - (arr2)[(idx2)]) \ - : ((arr2)[(idx2)] - (arr1)[(idx1)]); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \ - static_cast(idx1), static_cast((arr1)[(idx1)]), \ - static_cast(idx2), static_cast((arr2)[(idx2)]), \ - __FILE__, __LINE__); \ + if ((x) == (y)) { \ + MicroPrintf(#x " != " #y " failed at %s:%d", __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ - do { \ - auto vx = (x); \ - auto vy = (y); \ - auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #x " (%f) near " #y " (%f) failed at %s:%d", \ - static_cast(vx), static_cast(vy), __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_GT(x, y) \ - do { \ - if ((x) <= (y)) { \ - micro_test::reporter->Report(#x " > " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_LT(x, y) \ - do { \ - if ((x) >= (y)) { \ - micro_test::reporter->Report(#x " < " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_GE(x, y) \ +// TODO(wangtz): Making it more generic once needed. +#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \ + epsilon) \ do { \ - if ((x) < (y)) { \ - micro_test::reporter->Report(#x " >= " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ + auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \ + ? ((arr1)[(idx1)] - (arr2)[(idx2)]) \ + : ((arr2)[(idx2)] - (arr1)[(idx1)]); \ + if (delta > epsilon) { \ + MicroPrintf(#arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \ + static_cast(idx1), static_cast((arr1)[(idx1)]), \ + static_cast(idx2), static_cast((arr2)[(idx2)]), \ + __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_LE(x, y) \ +// The check vx != vy is needed to properly handle the case where both +// x and y evaluate to infinity. See #46960 for more details. +#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ do { \ - if ((x) > (y)) { \ - micro_test::reporter->Report(#x " <= " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ + auto vx = (x); \ + auto vy = (y); \ + auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ + if (vx != vy && delta > epsilon) { \ + MicroPrintf(#x " (%f) near " #y " (%f) failed at %s:%d", \ + static_cast(vx), static_cast(vy), __FILE__, \ + __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_TRUE(x) \ +#define TF_LITE_MICRO_EXPECT_GT(x, y) \ do { \ - if (!(x)) { \ - micro_test::reporter->Report(#x " was not true failed at %s:%d", \ - __FILE__, __LINE__); \ + if ((x) <= (y)) { \ + MicroPrintf(#x " > " #y " failed at %s:%d", __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_EXPECT_FALSE(x) \ +#define TF_LITE_MICRO_EXPECT_LT(x, y) \ + do { \ + if ((x) >= (y)) { \ + MicroPrintf(#x " < " #y " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } while (false) + +#define TF_LITE_MICRO_EXPECT_GE(x, y) \ do { \ - if (x) { \ - micro_test::reporter->Report(#x " was not false failed at %s:%d", \ - __FILE__, __LINE__); \ + if ((x) < (y)) { \ + MicroPrintf(#x " >= " #y " failed at %s:%d", __FILE__, __LINE__); \ micro_test::did_test_fail = true; \ } \ } while (false) -#define TF_LITE_MICRO_FAIL(msg) \ - do { \ - micro_test::reporter->Report("FAIL: %s", msg, __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ +#define TF_LITE_MICRO_EXPECT_LE(x, y) \ + do { \ + if ((x) > (y)) { \ + MicroPrintf(#x " <= " #y " failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ } while (false) -#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \ +#define TF_LITE_MICRO_EXPECT_TRUE(x) \ do { \ - for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \ - if (string1[i] != string2[i]) { \ - micro_test::reporter->Report("FAIL: %s did not match %s", string1, \ - string2, __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ + if (!(x)) { \ + MicroPrintf(#x " was not true failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ } \ } while (false) +#define TF_LITE_MICRO_EXPECT_FALSE(x) \ + do { \ + if (x) { \ + MicroPrintf(#x " was not false failed at %s:%d", __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } while (false) + +#define TF_LITE_MICRO_FAIL(msg) \ + do { \ + MicroPrintf("FAIL: %s", msg, __FILE__, __LINE__); \ + micro_test::did_test_fail = true; \ + } while (false) + +#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \ + do { \ + for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \ + if (string1[i] != string2[i]) { \ + MicroPrintf("FAIL: %s did not match %s", string1, string2, __FILE__, \ + __LINE__); \ + micro_test::did_test_fail = true; \ + } \ + } \ + } while (false) + #endif // TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_ diff --git a/code/components/tfmicro/tensorflow/lite/micro/testing/test_conv_model.cc b/code/components/tfmicro/tensorflow/lite/micro/testing/test_conv_model.cc deleted file mode 100644 index 358479c3..00000000 --- a/code/components/tfmicro/tensorflow/lite/micro/testing/test_conv_model.cc +++ /dev/null @@ -1,1799 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/testing/test_conv_model.h" - -extern const unsigned char kTestConvModelData[] = { - 0x24, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x1c, 0x00, 0x04, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x00, - 0x12, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xb4, 0x52, 0x00, 0x00, - 0x3c, 0x42, 0x00, 0x00, 0x24, 0x42, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, - 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x0f, 0x00, 0x00, 0x00, - 0xd4, 0x41, 0x00, 0x00, 0xc0, 0x41, 0x00, 0x00, 0x64, 0x41, 0x00, 0x00, - 0xc0, 0x40, 0x00, 0x00, 0x7c, 0x40, 0x00, 0x00, 0x58, 0x40, 0x00, 0x00, - 0x44, 0x13, 0x00, 0x00, 0xa0, 0x12, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x6c, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, - 0x44, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xd6, 0xbe, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x31, 0x2e, 0x35, 0x2e, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x94, 0xb2, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0xb2, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xb4, 0xb2, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xc4, 0xb2, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xb2, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x46, 0xbf, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x12, 0x00, 0x00, 0x7d, 0x6a, 0x24, 0xa1, 0xf6, 0xca, 0x70, 0x2f, - 0x8e, 0xb1, 0xe8, 0x15, 0x42, 0x08, 0x32, 0xf6, 0xe9, 0xfb, 0xa0, 0xda, - 0xe4, 0xf1, 0x0a, 0x9d, 0x72, 0x66, 0x88, 0x37, 0xe9, 0x9e, 0x08, 0x54, - 0x61, 0x51, 0x40, 0x93, 0x4d, 0xcf, 0xe2, 0x08, 0x36, 0xad, 0xb1, 0x8e, - 0xfc, 0xe4, 0x02, 0xd1, 0x9a, 0x1e, 0x05, 0x67, 0xa3, 0x3b, 0xa6, 0xde, - 0x5d, 0x2a, 0xcc, 0x8c, 0x3c, 0x2e, 0xd2, 0x15, 0xc2, 0x60, 0xab, 0xea, - 0x73, 0xe4, 0x88, 0xc1, 0x66, 0x21, 0xb0, 0xe5, 0x5b, 0x55, 0xda, 0x69, - 0x2d, 0x0c, 0x66, 0x07, 0x74, 0x36, 0xcd, 0x79, 0x81, 0xf9, 0x5c, 0x2c, - 0xb5, 0x93, 0xab, 0x76, 0xa1, 0x1f, 0x20, 0x90, 0x89, 0xe1, 0x41, 0xc7, - 0x32, 0xc2, 0xa3, 0x03, 0x77, 0x86, 0x79, 0xf7, 0x89, 0xc1, 0xb1, 0x42, - 0x2a, 0x75, 0xc7, 0xc1, 0x2f, 0xbb, 0xf6, 0xe8, 0x23, 0x99, 0x9b, 0x74, - 0x9c, 0xe5, 0x91, 0x15, 0xc6, 0x08, 0x0e, 0xae, 0x7c, 0xd3, 0x27, 0x54, - 0xfb, 0xa7, 0x49, 0x65, 0x52, 0x2f, 0x63, 0x33, 0x8b, 0x5f, 0x67, 0x21, - 0x25, 0xe0, 0xcf, 0x95, 0x03, 0x05, 0x19, 0x0c, 0x3d, 0xfc, 0x95, 0x42, - 0xa9, 0x26, 0x27, 0x54, 0xa3, 0x71, 0xb4, 0x70, 0x7a, 0x40, 0x0d, 0xc1, - 0x72, 0x04, 0x81, 0x3b, 0xb9, 0xb7, 0xd2, 0xc1, 0x4e, 0xf8, 0xff, 0xca, - 0x66, 0xc1, 0xbe, 0xb9, 0x09, 0xbd, 0xb9, 0x2c, 0x5b, 0x97, 0xc3, 0xa8, - 0xf6, 0xc4, 0x23, 0x93, 0x2e, 0xf6, 0xce, 0x2e, 0xdb, 0xfb, 0x8f, 0xb0, - 0xc8, 0xba, 0xfa, 0x97, 0xfd, 0xc0, 0x0a, 0xc8, 0x2c, 0xf3, 0x4c, 0x4d, - 0x8b, 0x3b, 0x47, 0x11, 0xfb, 0xe8, 0x96, 0xe3, 0xcc, 0xef, 0xe4, 0xb5, - 0x07, 0xa1, 0xb7, 0xa9, 0xf7, 0x98, 0x71, 0x59, 0x9b, 0x5a, 0x7b, 0x88, - 0xe4, 0xcf, 0x9b, 0x55, 0x26, 0xce, 0x59, 0x73, 0x66, 0x17, 0x9c, 0x74, - 0x02, 0xfc, 0x24, 0x01, 0xde, 0x44, 0x98, 0xe3, 0x8b, 0x18, 0x02, 0x42, - 0xf5, 0x0f, 0xbc, 0xcb, 0xf7, 0x37, 0xb1, 0xd5, 0xb4, 0x7c, 0x0a, 0x6a, - 0x59, 0x59, 0xc9, 0x11, 0xd8, 0x0f, 0xf9, 0xab, 0x40, 0xdd, 0x14, 0xf9, - 0x30, 0xaa, 0xf1, 0x8c, 0x6d, 0xbc, 0x4c, 0x5b, 0x71, 0x95, 0xfd, 0x41, - 0x4c, 0xf3, 0xb4, 0x7f, 0x1c, 0xb6, 0x4b, 0x12, 0x3b, 0x6e, 0xc1, 0xce, - 0x6f, 0xf8, 0x57, 0xb7, 0x5e, 0x2a, 0x36, 0x32, 0x3d, 0x85, 0xc6, 0xbf, - 0xd7, 0xab, 0x95, 0x45, 0x62, 0xae, 0xb8, 0xa6, 0x03, 0xcc, 0x21, 0x25, - 0x18, 0x5a, 0xa8, 0x03, 0x27, 0x33, 0x47, 0xb1, 0x7e, 0x0e, 0xbd, 0xc3, - 0x24, 0x25, 0x78, 0x28, 0xa4, 0xe3, 0x5b, 0x08, 0xbf, 0x04, 0xa2, 0xae, - 0x90, 0x4c, 0x96, 0x78, 0xa8, 0xb1, 0xb8, 0x54, 0x89, 0x25, 0x2d, 0x35, - 0x93, 0x95, 0xa5, 0xd3, 0x1a, 0xe6, 0x00, 0x8b, 0xfe, 0x36, 0x0f, 0xd2, - 0x6e, 0xff, 0x86, 0x93, 0x48, 0xb8, 0x08, 0x39, 0x1f, 0x3a, 0x2d, 0xe7, - 0x47, 0x5e, 0x05, 0x66, 0x7a, 0xb8, 0xe4, 0xda, 0xbc, 0x5b, 0x57, 0xdf, - 0xd9, 0x0a, 0xb9, 0x48, 0x5d, 0x0c, 0x57, 0xed, 0x8d, 0xbb, 0x8d, 0x4b, - 0x0e, 0xb8, 0xea, 0x02, 0x06, 0x2f, 0xfd, 0x28, 0x0d, 0x0b, 0xf4, 0xf4, - 0x52, 0x81, 0x77, 0x15, 0x87, 0x53, 0x28, 0xef, 0xbe, 0xc6, 0x4c, 0x45, - 0x3e, 0x1a, 0x6e, 0xbd, 0x10, 0xd8, 0x9a, 0x72, 0x1f, 0x14, 0xe2, 0x37, - 0x08, 0xaf, 0xfa, 0xce, 0xd3, 0x84, 0x23, 0x43, 0x8c, 0x5c, 0xce, 0x1b, - 0xf7, 0xf3, 0xb0, 0x3b, 0xfd, 0x33, 0xf8, 0x09, 0xf1, 0x41, 0xa5, 0xa8, - 0x86, 0x8d, 0x56, 0xde, 0xf6, 0x68, 0xe3, 0x4c, 0x97, 0xa6, 0xc3, 0x66, - 0x9b, 0xa9, 0x8a, 0xbd, 0x59, 0x45, 0xfb, 0xdf, 0xa1, 0x42, 0x10, 0x1c, - 0x55, 0x22, 0x53, 0xe1, 0x32, 0x33, 0xf9, 0xfa, 0xc2, 0x70, 0x0f, 0x49, - 0x15, 0xa7, 0x21, 0xbc, 0x56, 0x35, 0x09, 0x06, 0xe6, 0x5e, 0xc4, 0xc1, - 0x64, 0x93, 0x59, 0x3b, 0x8e, 0xb7, 0x52, 0x6c, 0x4d, 0xa1, 0xb7, 0xee, - 0x14, 0xc2, 0x01, 0x25, 0xbb, 0x5e, 0xe0, 0xc6, 0xa4, 0x4f, 0xb5, 0x20, - 0x88, 0xe0, 0xd7, 0x5e, 0x26, 0x5b, 0x9f, 0xf7, 0xb5, 0x26, 0x5b, 0xfc, - 0xf3, 0x3e, 0xf3, 0x57, 0x6f, 0x9e, 0x9e, 0x51, 0x07, 0x6e, 0xc0, 0x53, - 0x17, 0x89, 0x79, 0xf0, 0x91, 0xb2, 0x54, 0x30, 0x1f, 0x97, 0x95, 0xfc, - 0x02, 0x2d, 0x0c, 0x06, 0xb0, 0x82, 0xad, 0x20, 0xc2, 0xdc, 0x78, 0xbc, - 0xbe, 0x5b, 0x88, 0xa0, 0xdd, 0x45, 0x49, 0x26, 0xec, 0xb4, 0xa5, 0x8b, - 0x7f, 0xdd, 0x40, 0xcf, 0x9e, 0xbe, 0x46, 0x4d, 0x36, 0xab, 0x0a, 0x34, - 0x1a, 0x2a, 0xd0, 0xd3, 0x83, 0x96, 0xff, 0x88, 0xa4, 0xd8, 0x48, 0x75, - 0x2f, 0xcb, 0x3c, 0xc3, 0xbb, 0xc7, 0x2f, 0xe9, 0xf9, 0xa3, 0xde, 0x9d, - 0xbb, 0x5e, 0x37, 0x29, 0xf6, 0x75, 0xcc, 0x85, 0xeb, 0xf9, 0x73, 0xf7, - 0xdc, 0x31, 0x8c, 0x56, 0x52, 0x4a, 0x44, 0xa4, 0x2a, 0x2a, 0x51, 0x49, - 0x77, 0x6d, 0x35, 0x0a, 0xf9, 0x44, 0xaa, 0x36, 0x05, 0xef, 0x1e, 0x6b, - 0xe5, 0x65, 0x6b, 0xaa, 0xc1, 0x41, 0x9c, 0x62, 0xd0, 0x70, 0x78, 0xff, - 0x88, 0xe8, 0x5f, 0x3c, 0x2e, 0x00, 0x6c, 0xe3, 0xdb, 0xc3, 0x54, 0x66, - 0xa9, 0xf4, 0xe2, 0x4c, 0x91, 0x11, 0xc8, 0x3c, 0x39, 0x9b, 0x31, 0x81, - 0xc7, 0x11, 0x22, 0x62, 0xb7, 0x26, 0xa0, 0x0c, 0x2e, 0x6c, 0xe7, 0x34, - 0x3b, 0x1f, 0x27, 0xb3, 0xe5, 0x4f, 0xc9, 0x71, 0xb2, 0x18, 0x99, 0x59, - 0x95, 0xc6, 0x35, 0x4c, 0x5d, 0xa3, 0x59, 0xd1, 0x8b, 0x71, 0xea, 0xe7, - 0x30, 0x3f, 0xe7, 0x8c, 0x1a, 0x59, 0xeb, 0xc5, 0x5d, 0xbd, 0xe6, 0x00, - 0x67, 0x02, 0xfb, 0xca, 0x8d, 0xdf, 0x71, 0xb6, 0xed, 0xc7, 0xd2, 0xf2, - 0x72, 0x1b, 0xd3, 0x63, 0x51, 0x1f, 0x04, 0xe9, 0xf9, 0xe2, 0x38, 0x13, - 0x48, 0x63, 0x19, 0x66, 0x2b, 0x48, 0xc8, 0x1b, 0x9d, 0x19, 0x5a, 0x57, - 0x44, 0x2d, 0x30, 0xb5, 0xce, 0x3b, 0xcc, 0xae, 0xc4, 0x5e, 0x4e, 0x96, - 0x62, 0x5c, 0x53, 0x1f, 0xbf, 0xbd, 0xc8, 0x9d, 0xcf, 0x81, 0xb3, 0x1e, - 0xb0, 0x22, 0xd5, 0xbe, 0x60, 0x65, 0xd9, 0xeb, 0x11, 0x74, 0x8c, 0x24, - 0x18, 0x67, 0x45, 0xd3, 0xf8, 0x3f, 0xc5, 0xdf, 0xac, 0x65, 0xd4, 0x0c, - 0x82, 0x63, 0xd6, 0x43, 0x94, 0xa0, 0x3b, 0xff, 0x03, 0x0f, 0xbb, 0xe4, - 0x4d, 0x3b, 0x41, 0x9f, 0xf4, 0x1a, 0xa9, 0xdb, 0x15, 0x5b, 0x9a, 0x92, - 0xcb, 0xd5, 0xb8, 0x33, 0x5e, 0xea, 0x28, 0x3d, 0x2d, 0x30, 0x20, 0xcd, - 0xb6, 0x23, 0x18, 0x0e, 0x10, 0x2a, 0xa9, 0xe1, 0xad, 0xbc, 0x96, 0xd1, - 0xf9, 0xf3, 0x95, 0x4f, 0x2a, 0x0b, 0x91, 0xff, 0xf0, 0x96, 0x14, 0x00, - 0xaa, 0xfb, 0x1a, 0x44, 0x21, 0x9b, 0xe8, 0x71, 0x31, 0x9e, 0xd6, 0x58, - 0x7f, 0x02, 0x36, 0x5e, 0x92, 0x8d, 0x93, 0x99, 0xac, 0xb6, 0x87, 0x39, - 0xda, 0x47, 0xef, 0x70, 0xd4, 0xf7, 0x8d, 0x2a, 0xbd, 0x08, 0x40, 0x4d, - 0xec, 0xeb, 0x4e, 0x1b, 0x85, 0x5d, 0x55, 0x64, 0x4c, 0xf3, 0x5e, 0x8f, - 0x68, 0x1e, 0x5e, 0x64, 0xc3, 0xb8, 0x92, 0x24, 0x41, 0x98, 0x78, 0x09, - 0x85, 0x87, 0x17, 0x2c, 0x88, 0x9e, 0x62, 0x86, 0x4f, 0x44, 0x71, 0x9c, - 0xa8, 0x73, 0xb3, 0x14, 0x1f, 0x3c, 0x96, 0x6b, 0xab, 0xad, 0x43, 0xdf, - 0x67, 0x34, 0x66, 0x30, 0x1d, 0x15, 0xd3, 0xe7, 0xd5, 0x8b, 0x00, 0xaa, - 0x11, 0x77, 0xea, 0x36, 0xc9, 0x49, 0x99, 0x93, 0x01, 0x6e, 0x00, 0x4a, - 0x93, 0x08, 0x2c, 0x44, 0x01, 0x91, 0xe0, 0x91, 0xdd, 0xab, 0x70, 0x4b, - 0xe7, 0xbf, 0x2d, 0x0f, 0xd4, 0x52, 0xa0, 0xf1, 0x5d, 0xa0, 0xcc, 0xb9, - 0x1b, 0xa2, 0x62, 0xeb, 0x23, 0x1e, 0x8e, 0xbb, 0x2b, 0xb6, 0xc5, 0x3a, - 0xdf, 0x32, 0x99, 0xde, 0x2e, 0x94, 0xcf, 0x98, 0x99, 0x34, 0x59, 0x60, - 0xcf, 0x57, 0xe0, 0xb0, 0xd9, 0x89, 0xaa, 0xc2, 0x4f, 0x1e, 0x38, 0x88, - 0xca, 0x32, 0x93, 0x9b, 0xa3, 0x2b, 0x17, 0x0b, 0x40, 0x5e, 0x69, 0xbd, - 0x14, 0x15, 0xca, 0x1a, 0x21, 0xdf, 0xa8, 0x4e, 0x14, 0x5e, 0x18, 0x40, - 0xe3, 0x4e, 0x04, 0x1f, 0xe5, 0x81, 0x53, 0x11, 0xae, 0x5e, 0x30, 0xe5, - 0xda, 0xd7, 0xf1, 0x3b, 0x72, 0x1b, 0xa5, 0xe3, 0x13, 0xad, 0x40, 0x54, - 0xae, 0xf0, 0xbc, 0x2b, 0xc1, 0x1a, 0x9c, 0xdd, 0xe1, 0xd0, 0x12, 0x10, - 0xfd, 0x59, 0xce, 0x36, 0x60, 0x86, 0xa0, 0xa7, 0xee, 0xe1, 0x02, 0xe6, - 0xf8, 0xf0, 0x5c, 0x4f, 0xa3, 0xa4, 0xe4, 0x09, 0xb9, 0xc3, 0x84, 0xe3, - 0x8d, 0x97, 0x21, 0x62, 0xf3, 0x11, 0x47, 0xb1, 0x4a, 0xce, 0x5b, 0x89, - 0xde, 0x86, 0xb5, 0x0e, 0xba, 0xbc, 0x8c, 0xcf, 0x54, 0x38, 0x3a, 0xc6, - 0xaf, 0x8c, 0x4d, 0x9d, 0xff, 0x58, 0x9b, 0xe8, 0x32, 0xb7, 0xa2, 0x29, - 0xad, 0x91, 0x3a, 0xa5, 0xc7, 0x54, 0xff, 0xd8, 0x47, 0x4f, 0x8f, 0x38, - 0x91, 0x12, 0x76, 0xa3, 0x2e, 0xf7, 0xdd, 0xba, 0xa7, 0xd4, 0x49, 0xe5, - 0xd1, 0x74, 0xe9, 0x2a, 0x29, 0xe4, 0x64, 0xb9, 0x58, 0x98, 0x0c, 0xe5, - 0x1f, 0xb2, 0x0e, 0x33, 0xea, 0xf8, 0x2e, 0xb1, 0x22, 0x46, 0xc2, 0x67, - 0x2d, 0xfe, 0x2e, 0xd3, 0xcf, 0xbc, 0x64, 0x7b, 0x75, 0x24, 0x53, 0x1c, - 0x42, 0x8c, 0x0b, 0x99, 0x9e, 0xa7, 0xa6, 0xb9, 0xfb, 0x5d, 0x86, 0x9f, - 0xe9, 0x04, 0x62, 0xb2, 0x42, 0x81, 0xa2, 0x0d, 0x60, 0x83, 0x40, 0xbb, - 0x21, 0x10, 0xdf, 0xaa, 0xe6, 0x6c, 0x72, 0xc5, 0xb1, 0xad, 0x9f, 0xd2, - 0x91, 0xf8, 0xb6, 0x56, 0xfb, 0x2e, 0xb3, 0xc4, 0x12, 0xd9, 0x86, 0x29, - 0x6c, 0x55, 0x88, 0x72, 0xba, 0xfb, 0x9b, 0xb9, 0x6f, 0x2d, 0x7d, 0x75, - 0xd0, 0x9d, 0xaf, 0x44, 0xb6, 0xbd, 0x7b, 0xec, 0x78, 0xf1, 0xbf, 0x66, - 0xe8, 0x79, 0x66, 0x16, 0x5e, 0xf9, 0x68, 0x89, 0x5b, 0xde, 0x8f, 0xf9, - 0xeb, 0x04, 0x0b, 0x6a, 0x71, 0xa1, 0x3b, 0x46, 0x03, 0xb4, 0x29, 0xa9, - 0x31, 0xf4, 0xc5, 0xd3, 0x43, 0x6d, 0x88, 0x43, 0xa8, 0xef, 0xb7, 0xd7, - 0x75, 0x6b, 0x83, 0x35, 0xb6, 0x2f, 0xe0, 0x5f, 0xf2, 0x14, 0xcd, 0xd0, - 0x06, 0xb3, 0x5e, 0x8b, 0xdb, 0x86, 0x11, 0x94, 0x2f, 0xfb, 0x92, 0x19, - 0x52, 0x7f, 0xcb, 0xe5, 0x22, 0x27, 0x5f, 0xe4, 0x68, 0xb2, 0xcb, 0xc7, - 0xb8, 0xec, 0xfd, 0x9e, 0x39, 0x9c, 0x5b, 0xe4, 0xae, 0xca, 0x83, 0x19, - 0xcf, 0xf0, 0x01, 0xe3, 0xfc, 0xb0, 0x28, 0xda, 0x79, 0x84, 0xfb, 0xfe, - 0xa5, 0xb6, 0xb3, 0xd2, 0x73, 0xd3, 0x11, 0xe5, 0xdf, 0x7a, 0xd7, 0x82, - 0x78, 0x25, 0x06, 0x5b, 0x0f, 0x89, 0x9d, 0x0b, 0x9b, 0xd1, 0x1b, 0xc5, - 0xb7, 0x67, 0xef, 0x7c, 0xa2, 0xa3, 0xca, 0x27, 0xd0, 0x59, 0xb9, 0x99, - 0x86, 0xa9, 0xf6, 0x9a, 0x28, 0xf0, 0xbb, 0x42, 0xd2, 0xa0, 0xa8, 0x01, - 0x29, 0xa1, 0x0c, 0x1b, 0x33, 0x1b, 0x9c, 0xcb, 0xe4, 0x6c, 0x61, 0x0a, - 0xc4, 0xd7, 0x6c, 0xec, 0x86, 0xb3, 0xd2, 0xaa, 0x8c, 0xab, 0x1a, 0xf4, - 0x03, 0x2e, 0x2b, 0x42, 0xbe, 0xc1, 0x31, 0x1d, 0x57, 0x47, 0xdc, 0x7b, - 0xb5, 0x8f, 0x8b, 0xdf, 0x06, 0xad, 0x3f, 0xf4, 0x4f, 0xb5, 0x52, 0x07, - 0x4e, 0x25, 0xb3, 0x73, 0x34, 0x92, 0x6a, 0x89, 0x93, 0x28, 0x8b, 0x96, - 0x9d, 0xdb, 0xb4, 0x77, 0x81, 0x76, 0x86, 0xd2, 0xa5, 0x94, 0x76, 0x35, - 0xc9, 0x66, 0x4e, 0xd8, 0xc5, 0xc3, 0xc9, 0x34, 0xaf, 0xad, 0x4a, 0x7c, - 0x92, 0x24, 0xb1, 0x7d, 0x7d, 0xac, 0xf6, 0xcb, 0x8f, 0x36, 0xc1, 0xb2, - 0x63, 0x78, 0x99, 0x33, 0x23, 0x68, 0x6e, 0x71, 0x6a, 0xcc, 0x05, 0xf9, - 0x41, 0x92, 0x30, 0xf0, 0xb1, 0xb4, 0xa6, 0x46, 0x86, 0x62, 0xd9, 0xd9, - 0x94, 0x8a, 0xb2, 0x9c, 0x68, 0xff, 0xf4, 0x3a, 0x2e, 0xaf, 0xee, 0xcf, - 0x04, 0x94, 0x53, 0x35, 0x25, 0xf9, 0xaa, 0x74, 0x93, 0xf3, 0x63, 0xc0, - 0xd2, 0x22, 0x30, 0x8c, 0xde, 0xa6, 0xb1, 0xb4, 0xa1, 0x56, 0x07, 0x06, - 0x71, 0xa2, 0x9e, 0x42, 0x31, 0xa3, 0x1e, 0xa6, 0x9a, 0xbc, 0x9f, 0x5b, - 0x12, 0x3c, 0xc2, 0x74, 0xf9, 0x61, 0x71, 0xef, 0x73, 0x86, 0xc2, 0x3b, - 0x25, 0x8a, 0x31, 0x72, 0x27, 0xac, 0xa4, 0x72, 0xf3, 0xbb, 0x78, 0x2c, - 0x94, 0xed, 0xa8, 0x3a, 0x42, 0x98, 0x34, 0xda, 0x3e, 0x60, 0x1c, 0x4a, - 0xec, 0x6b, 0x4e, 0x5f, 0x2a, 0x62, 0xb9, 0xad, 0xc9, 0xd9, 0x38, 0x90, - 0xa7, 0x3b, 0xd3, 0x1a, 0xbb, 0x81, 0x0d, 0x33, 0xd9, 0x16, 0x35, 0x8e, - 0xc3, 0x88, 0x36, 0xfa, 0x3e, 0xa8, 0x4f, 0x30, 0x9d, 0xf1, 0x08, 0xea, - 0x40, 0x1b, 0x87, 0x4d, 0x23, 0x8e, 0x8e, 0xb0, 0xe2, 0xf0, 0x27, 0xc1, - 0xdc, 0x0d, 0xe2, 0x8f, 0x93, 0xef, 0x8b, 0xd1, 0x19, 0xa5, 0xbe, 0xd7, - 0x5a, 0x8a, 0x38, 0x62, 0x43, 0xba, 0x74, 0xf8, 0xae, 0x11, 0x1f, 0x1d, - 0xa4, 0x6e, 0x70, 0x94, 0x91, 0x14, 0xf4, 0xff, 0xbe, 0x39, 0xb4, 0x33, - 0xc2, 0x87, 0x74, 0x1b, 0xfd, 0x9a, 0xa8, 0x64, 0x09, 0x4b, 0x7f, 0x95, - 0x0a, 0xcb, 0x6b, 0x15, 0x54, 0x1d, 0xc6, 0x03, 0x1d, 0x1b, 0x25, 0x56, - 0x15, 0xb5, 0xd7, 0xe5, 0xd6, 0xf3, 0x28, 0xa4, 0xde, 0x1b, 0x39, 0x0d, - 0x59, 0x26, 0x12, 0xe4, 0x32, 0xf2, 0x25, 0xeb, 0xc0, 0xdb, 0x58, 0xe5, - 0xce, 0x64, 0x6f, 0x70, 0x74, 0xc1, 0xc9, 0xbd, 0x75, 0xef, 0x16, 0x02, - 0xdf, 0x27, 0x09, 0xc8, 0xb8, 0x37, 0x8f, 0x44, 0x0d, 0x58, 0x48, 0xf5, - 0xc2, 0x53, 0x21, 0x28, 0x16, 0xa4, 0x56, 0x02, 0xdf, 0xa7, 0x97, 0xa4, - 0x5c, 0x48, 0x75, 0x51, 0x89, 0x0b, 0xa7, 0x4d, 0xd9, 0x9e, 0x04, 0x4e, - 0x5d, 0x6c, 0xe5, 0x1f, 0x68, 0x88, 0xcc, 0xb7, 0x9a, 0x20, 0x05, 0x83, - 0x82, 0x6c, 0xfd, 0xdb, 0x07, 0x6c, 0xec, 0x61, 0xaa, 0x36, 0x57, 0x68, - 0x01, 0xf2, 0x70, 0xfe, 0xe6, 0x4d, 0xe1, 0xa9, 0xb6, 0xb6, 0x52, 0xe6, - 0x20, 0x52, 0x0f, 0x27, 0x9a, 0x1c, 0x2d, 0x20, 0x9b, 0xd4, 0x07, 0xd3, - 0xf6, 0x85, 0x4b, 0xf2, 0x52, 0x4d, 0x4c, 0xd7, 0xf0, 0x32, 0x5d, 0x2e, - 0xef, 0xa2, 0xd0, 0xcd, 0x48, 0x89, 0xbc, 0x9f, 0xcb, 0x37, 0x02, 0x29, - 0xa5, 0xdb, 0xab, 0xfa, 0x1d, 0xf4, 0x53, 0x78, 0x30, 0xde, 0x2c, 0x5c, - 0x35, 0x7f, 0x3d, 0xe1, 0xe0, 0xce, 0xdb, 0x13, 0xca, 0x2a, 0xae, 0xdf, - 0x1c, 0xb1, 0xb6, 0xb9, 0x6a, 0x9f, 0x28, 0xb0, 0x54, 0x5a, 0x00, 0xdd, - 0x76, 0x14, 0xfb, 0x17, 0xc2, 0x2a, 0x45, 0xa2, 0x18, 0xbb, 0x8a, 0x3e, - 0xbe, 0x0e, 0xa5, 0x1b, 0x3c, 0x70, 0x56, 0x10, 0x98, 0xec, 0xc6, 0x3a, - 0x95, 0x2a, 0x96, 0x6a, 0x44, 0xef, 0xd9, 0x9c, 0x2a, 0x45, 0xb4, 0x15, - 0xf8, 0x2e, 0x03, 0x5d, 0x8c, 0x79, 0xfb, 0xb0, 0x53, 0x71, 0xcd, 0x0d, - 0xf4, 0xe2, 0xfc, 0x3b, 0x71, 0xee, 0x30, 0xf2, 0x29, 0xd3, 0xaa, 0x18, - 0x7a, 0x45, 0x1d, 0x99, 0x6d, 0x2f, 0x1f, 0x2d, 0x32, 0x23, 0x48, 0xc2, - 0x69, 0x33, 0x3d, 0x04, 0xa7, 0xa3, 0x96, 0xb5, 0x76, 0x5b, 0x4e, 0xb7, - 0x3c, 0x10, 0x58, 0x17, 0xf4, 0x5f, 0xec, 0x51, 0x6d, 0x5a, 0x3b, 0x7f, - 0x1e, 0x0e, 0xbb, 0xbf, 0x77, 0x43, 0xf7, 0xa4, 0x57, 0xc0, 0x33, 0xac, - 0xc1, 0xe3, 0x3e, 0x1f, 0x65, 0x3c, 0x62, 0x19, 0x46, 0x2d, 0x7b, 0x2d, - 0x07, 0x44, 0x48, 0xf4, 0x91, 0xdf, 0x59, 0x32, 0x10, 0xf7, 0x12, 0xe2, - 0xe5, 0x39, 0x70, 0x37, 0xa4, 0x79, 0x9a, 0x17, 0x19, 0xe8, 0x90, 0xe7, - 0x37, 0x0d, 0xb6, 0x6d, 0x58, 0xe6, 0x7e, 0x57, 0x76, 0x8a, 0xe8, 0xd0, - 0x76, 0x30, 0x25, 0xda, 0xb6, 0xdf, 0x59, 0x3c, 0x6c, 0x20, 0x65, 0x88, - 0xd2, 0x60, 0x5e, 0x39, 0xb6, 0x6b, 0xac, 0xa2, 0x25, 0xc6, 0xa7, 0xb1, - 0x2f, 0xbb, 0x1d, 0x23, 0xee, 0x02, 0x08, 0x1d, 0xd6, 0x6c, 0x0e, 0xbc, - 0xea, 0xd2, 0xc2, 0x70, 0x34, 0xe9, 0x96, 0xd3, 0xf3, 0xf4, 0x8e, 0x94, - 0x6f, 0x86, 0x76, 0xe7, 0x38, 0x08, 0x6f, 0x47, 0xf5, 0xcd, 0xab, 0xad, - 0x7a, 0x39, 0x10, 0x9a, 0xa8, 0x44, 0xba, 0x2d, 0x7f, 0x05, 0x1e, 0xb7, - 0x44, 0xd8, 0x10, 0x05, 0xd1, 0x8d, 0x98, 0x09, 0x14, 0xbb, 0x6b, 0x2b, - 0xf7, 0xeb, 0x9f, 0xa5, 0x65, 0x4b, 0x21, 0xff, 0xaf, 0xe8, 0x2e, 0x34, - 0x52, 0x38, 0xcf, 0xd5, 0x51, 0x29, 0x2c, 0x91, 0x43, 0x3a, 0x49, 0x42, - 0xdd, 0xfb, 0x0e, 0xd2, 0x77, 0x8f, 0x65, 0x93, 0x3e, 0x52, 0x22, 0x58, - 0xd6, 0xf9, 0xd9, 0x58, 0xd4, 0x06, 0xa9, 0x0c, 0x79, 0x9f, 0x1b, 0xa5, - 0x45, 0x61, 0xd8, 0x4e, 0xbf, 0x4b, 0x51, 0xe2, 0xfb, 0x6f, 0x58, 0xee, - 0xc5, 0xa5, 0x11, 0xbd, 0x99, 0x25, 0x14, 0xac, 0x94, 0x0e, 0xd1, 0xf7, - 0x54, 0xb6, 0x05, 0x8c, 0xc3, 0x57, 0xa5, 0x3c, 0x3c, 0xa6, 0x83, 0x47, - 0x38, 0xd1, 0x6a, 0xab, 0x12, 0xc0, 0xd3, 0x7f, 0x96, 0x55, 0xd7, 0xf4, - 0x3a, 0xd0, 0x08, 0x85, 0x5f, 0x3d, 0x65, 0x8e, 0xbb, 0xea, 0x34, 0xf3, - 0x53, 0x96, 0x71, 0x08, 0x9b, 0x50, 0xe9, 0x4b, 0xce, 0x8a, 0x2f, 0xef, - 0xe4, 0xb2, 0x72, 0x68, 0xcb, 0x88, 0xa8, 0xd9, 0xd9, 0xa2, 0xfc, 0x62, - 0xe8, 0x8b, 0x23, 0x2b, 0xbc, 0xf0, 0x9e, 0xb4, 0xd0, 0x40, 0x8b, 0x45, - 0xff, 0x6d, 0x37, 0x01, 0xa6, 0x4b, 0x62, 0xe0, 0x3b, 0x4e, 0x18, 0x67, - 0xb3, 0x97, 0x04, 0xa0, 0x2a, 0xf2, 0x11, 0x79, 0x38, 0xb4, 0xb2, 0xed, - 0x64, 0xc1, 0x1e, 0xfe, 0xc4, 0xf4, 0xe2, 0x4d, 0x94, 0xb4, 0x17, 0x52, - 0x1a, 0x63, 0xe6, 0x56, 0x8a, 0x41, 0x0a, 0x5b, 0xa2, 0x1c, 0x59, 0xef, - 0x17, 0x64, 0xf9, 0xf7, 0x2c, 0xa4, 0xfd, 0x66, 0xf7, 0xe3, 0xae, 0xa0, - 0x54, 0x36, 0x64, 0x26, 0x84, 0x51, 0x49, 0xd5, 0x3a, 0x5e, 0x2c, 0xc5, - 0xca, 0xde, 0x8e, 0xe7, 0x25, 0x59, 0xb3, 0x9a, 0xb2, 0xf0, 0xff, 0xf1, - 0x83, 0xe5, 0x70, 0xc3, 0xef, 0x63, 0x66, 0x31, 0x04, 0x4d, 0x42, 0xf1, - 0xd9, 0x4c, 0x5e, 0x29, 0x92, 0x37, 0x8d, 0xd1, 0x18, 0x2a, 0x9e, 0x3c, - 0xcc, 0x05, 0xb9, 0xc4, 0xb6, 0xe7, 0x2a, 0x09, 0x3a, 0x68, 0xb5, 0x61, - 0x60, 0x36, 0x11, 0x02, 0x92, 0xf8, 0xa0, 0x56, 0x9b, 0xe8, 0xfe, 0xac, - 0x87, 0xcc, 0xaf, 0xb9, 0x62, 0xa7, 0x1e, 0x99, 0xb8, 0x9f, 0x47, 0xf7, - 0xa5, 0x12, 0x47, 0x66, 0xeb, 0xd6, 0x3a, 0x6f, 0xb3, 0x26, 0x63, 0xe2, - 0xec, 0x0c, 0xba, 0x7d, 0xc2, 0x9b, 0xb2, 0x10, 0x62, 0x03, 0x3f, 0x20, - 0xed, 0x7a, 0xce, 0x47, 0xd0, 0x50, 0x5b, 0x5c, 0x66, 0xbf, 0x01, 0x09, - 0x84, 0x0b, 0x71, 0xa8, 0x1f, 0x8d, 0xe1, 0x05, 0x09, 0xb4, 0xd5, 0x34, - 0xf1, 0xba, 0x31, 0xc6, 0x76, 0x8e, 0x00, 0x96, 0x3d, 0x6b, 0xe4, 0x66, - 0x3a, 0x22, 0xcd, 0x7f, 0x9d, 0xf8, 0x64, 0xfc, 0x76, 0x42, 0x88, 0x0e, - 0x32, 0xa5, 0xd0, 0x69, 0x56, 0xe2, 0xa5, 0x6f, 0xbb, 0xfa, 0xd8, 0xde, - 0xb4, 0x23, 0xa9, 0xc7, 0x9a, 0xc1, 0x99, 0xa7, 0x7f, 0x79, 0x58, 0xe1, - 0xe7, 0xc5, 0x56, 0x36, 0xc0, 0xfb, 0x8d, 0x8f, 0xe4, 0x6c, 0x96, 0x89, - 0xcb, 0xb0, 0xb0, 0x6e, 0xee, 0x20, 0x46, 0xd3, 0x43, 0x83, 0xac, 0x39, - 0x7c, 0x25, 0xba, 0x69, 0x3a, 0x58, 0x8a, 0x48, 0x0a, 0xf7, 0xb7, 0xfc, - 0x58, 0x7b, 0x93, 0x8b, 0xcd, 0x81, 0x7e, 0x94, 0xe0, 0xdf, 0xb1, 0xca, - 0xf6, 0x60, 0x54, 0xa9, 0x6e, 0xc6, 0x7f, 0xac, 0xfb, 0x62, 0xfe, 0xd9, - 0xd5, 0xf4, 0x6c, 0x62, 0x65, 0xf6, 0x0b, 0x24, 0x49, 0x1d, 0x55, 0xd6, - 0x4c, 0x0b, 0x5a, 0xf1, 0x2e, 0x78, 0x7a, 0x4e, 0xc1, 0xd0, 0xdb, 0xfe, - 0xd2, 0x84, 0x60, 0x68, 0x51, 0x8e, 0x3f, 0xf1, 0xa8, 0x90, 0xbf, 0xda, - 0x86, 0xda, 0x41, 0xd8, 0x90, 0x7b, 0xc3, 0xc8, 0x9e, 0xa5, 0x77, 0x06, - 0x56, 0x02, 0x13, 0x59, 0xaa, 0x89, 0xf9, 0xd5, 0x3c, 0x1d, 0xe2, 0xa9, - 0xb1, 0xc8, 0x02, 0x5a, 0x1c, 0xae, 0x72, 0x66, 0xdf, 0xb4, 0x1a, 0xb7, - 0xd2, 0x4d, 0xda, 0x4f, 0xc9, 0xed, 0x88, 0x7d, 0x9b, 0xc4, 0x4a, 0x8c, - 0x5e, 0x77, 0xaf, 0xd6, 0xd3, 0xbb, 0x38, 0xd2, 0xfa, 0x85, 0xe4, 0xdd, - 0xe7, 0x6e, 0xcb, 0x0b, 0x34, 0x1e, 0xa8, 0xfd, 0xf4, 0xd2, 0xc3, 0xdd, - 0xe0, 0xa6, 0xb1, 0x78, 0x16, 0x85, 0x2b, 0x1b, 0x22, 0xa6, 0xd5, 0x93, - 0x4f, 0xa1, 0xd5, 0x10, 0x96, 0xab, 0x38, 0xa7, 0x3c, 0xf2, 0xbd, 0xd9, - 0x7c, 0x59, 0x71, 0x25, 0x6f, 0x7c, 0xce, 0x73, 0x8e, 0x4e, 0xfb, 0x5a, - 0x30, 0x24, 0x53, 0xc5, 0xa3, 0x20, 0x13, 0x03, 0xfc, 0x7a, 0xaf, 0x1f, - 0x71, 0x5d, 0x6b, 0xce, 0x2e, 0x92, 0x16, 0x4d, 0xab, 0x96, 0x10, 0xc0, - 0xf6, 0x3c, 0xfe, 0x51, 0x89, 0x4d, 0x39, 0x45, 0x2c, 0x92, 0x5a, 0x86, - 0x24, 0xce, 0xbc, 0x75, 0xc6, 0x7f, 0x0e, 0xc2, 0xd1, 0xe7, 0x6a, 0x75, - 0x30, 0x59, 0xfb, 0xbf, 0x6b, 0xcf, 0x60, 0x90, 0x07, 0x73, 0xb1, 0x47, - 0x6e, 0x5d, 0xcd, 0x44, 0xac, 0xee, 0x2a, 0xdb, 0x16, 0x5a, 0x1a, 0xaf, - 0xba, 0xf8, 0x64, 0xdd, 0xdd, 0xed, 0x46, 0x4b, 0x67, 0xf3, 0xf8, 0x2d, - 0x22, 0xe9, 0x25, 0x74, 0x4c, 0x70, 0xe0, 0x3d, 0xbc, 0x11, 0xd3, 0x56, - 0xec, 0x86, 0x39, 0x89, 0x4c, 0xf2, 0xbc, 0x39, 0xdc, 0xde, 0x5f, 0x3b, - 0x42, 0xcb, 0xf6, 0x0c, 0x49, 0x8c, 0x66, 0x76, 0x58, 0x28, 0xe8, 0x47, - 0x59, 0x40, 0x11, 0xef, 0xb5, 0x9d, 0x93, 0xe5, 0x39, 0x56, 0x62, 0x0d, - 0xd0, 0xdd, 0xbb, 0x51, 0xff, 0x87, 0xa3, 0xd1, 0x9e, 0x0e, 0x0c, 0xbd, - 0x8e, 0xfc, 0xa5, 0x44, 0xc7, 0x6d, 0x35, 0x1d, 0x69, 0x14, 0x5b, 0x0d, - 0x45, 0xff, 0x85, 0x2d, 0xd1, 0x14, 0xf4, 0x5e, 0x5b, 0x49, 0x85, 0xad, - 0x69, 0xf1, 0x34, 0x9e, 0x7a, 0xf3, 0xed, 0x2d, 0xf2, 0x5f, 0x70, 0x5a, - 0xc1, 0xca, 0x63, 0xb5, 0xec, 0x49, 0xfc, 0x88, 0xcb, 0x0f, 0x81, 0x1d, - 0xd4, 0x2f, 0x18, 0xf6, 0xfe, 0x71, 0x51, 0xe2, 0x25, 0x71, 0x48, 0xa4, - 0xb2, 0x9f, 0x4f, 0xc0, 0xa5, 0x24, 0x12, 0x5b, 0xf8, 0xf2, 0xcf, 0x6e, - 0x52, 0x52, 0x6a, 0xee, 0x7d, 0xa5, 0x9b, 0xdb, 0x9c, 0xc9, 0x35, 0x30, - 0x1a, 0xf0, 0x7d, 0xcc, 0x98, 0x73, 0x09, 0x16, 0x8c, 0x05, 0x8d, 0x70, - 0xa3, 0x15, 0xd6, 0x7a, 0xa0, 0x7c, 0xd5, 0xcc, 0xd3, 0x29, 0x32, 0x2e, - 0xa5, 0xde, 0xf6, 0xd3, 0xa4, 0x03, 0x59, 0x6c, 0x05, 0x2d, 0x0e, 0x8b, - 0xb7, 0x1f, 0xa0, 0x57, 0x5c, 0x76, 0xde, 0x81, 0xcb, 0x64, 0xb9, 0x73, - 0xc1, 0x3b, 0x26, 0xba, 0x16, 0xdb, 0xe6, 0x40, 0x23, 0xa4, 0xe9, 0x24, - 0x48, 0xb8, 0x73, 0x23, 0x67, 0xbf, 0x26, 0xca, 0x95, 0x4f, 0xa0, 0x60, - 0x95, 0xa2, 0x0f, 0x29, 0xed, 0x5d, 0x71, 0x66, 0x94, 0xa3, 0xd0, 0x2a, - 0x4e, 0x17, 0x32, 0x18, 0xe6, 0xd6, 0x75, 0x84, 0xa5, 0x2a, 0x72, 0x18, - 0x60, 0x85, 0xde, 0x66, 0x22, 0x52, 0xf6, 0x45, 0xd6, 0xf0, 0xed, 0x93, - 0x0f, 0x5a, 0xa9, 0x12, 0x2a, 0xc4, 0xa8, 0x3d, 0x97, 0xc9, 0xc7, 0x84, - 0x71, 0x14, 0xb3, 0x54, 0xb6, 0xf7, 0x92, 0x7a, 0xc0, 0x6e, 0x02, 0xf7, - 0x48, 0xdb, 0x7c, 0xc1, 0x45, 0x21, 0xdb, 0x1b, 0x51, 0xc3, 0xea, 0xc0, - 0x19, 0x31, 0xe4, 0x6c, 0x20, 0x5f, 0x08, 0xe7, 0x88, 0xf7, 0xc0, 0x6e, - 0xee, 0x5f, 0x20, 0x33, 0x68, 0xef, 0xc5, 0x33, 0x1b, 0x40, 0x66, 0xc5, - 0xa3, 0x68, 0xdb, 0xbc, 0x8a, 0xb7, 0x54, 0xdb, 0xc7, 0xc5, 0x2c, 0x42, - 0x65, 0x51, 0xab, 0x56, 0x94, 0x73, 0xec, 0xd9, 0x95, 0xfa, 0x6a, 0x56, - 0xef, 0x22, 0x95, 0xa4, 0x75, 0x46, 0xee, 0x60, 0x8b, 0x25, 0xa6, 0x92, - 0x0a, 0x8e, 0xc1, 0x39, 0x97, 0x69, 0xa9, 0x19, 0x97, 0xf1, 0x0f, 0x61, - 0xc2, 0x40, 0x7d, 0x62, 0xe9, 0x5e, 0x22, 0x1f, 0x27, 0xe5, 0xc7, 0xe7, - 0xa4, 0x35, 0x5d, 0x90, 0xc7, 0x38, 0x38, 0x2d, 0xb0, 0x1e, 0x29, 0x0f, - 0x4f, 0x08, 0x8b, 0xdd, 0x69, 0x3c, 0x5c, 0x03, 0xbe, 0x9a, 0x76, 0xba, - 0x91, 0xf5, 0x57, 0x07, 0x39, 0xfe, 0x09, 0xfc, 0x01, 0x7b, 0x37, 0xc4, - 0x73, 0x7f, 0x76, 0x50, 0x76, 0xae, 0x6e, 0x4b, 0x22, 0x2c, 0x3b, 0xe7, - 0x77, 0x19, 0x9a, 0x92, 0x26, 0xdf, 0xc4, 0xe6, 0xd8, 0x57, 0xc1, 0x7f, - 0x65, 0x0b, 0xfb, 0xfa, 0xdd, 0xd2, 0x8c, 0xc7, 0xb1, 0x72, 0x2a, 0xb2, - 0x5a, 0xfa, 0xb2, 0x84, 0xb1, 0xec, 0x79, 0x9e, 0xde, 0xd8, 0x2f, 0xdf, - 0x3b, 0x39, 0x0b, 0xac, 0xfa, 0xb8, 0x07, 0x38, 0xff, 0x2e, 0x22, 0x2b, - 0xc9, 0x31, 0x3b, 0x09, 0x05, 0xd2, 0x06, 0xc4, 0x2d, 0x22, 0x1c, 0x21, - 0x70, 0x03, 0x93, 0xd1, 0x3a, 0x8d, 0x94, 0x60, 0xfe, 0x99, 0x13, 0xc3, - 0x00, 0x03, 0x41, 0xfa, 0x50, 0x79, 0x31, 0xeb, 0xf0, 0xf4, 0x06, 0x7a, - 0x19, 0xe8, 0x90, 0xdf, 0x61, 0x4d, 0x5f, 0xe3, 0x99, 0x1b, 0xca, 0xbf, - 0xcf, 0xae, 0xca, 0xfa, 0x84, 0x63, 0x88, 0x56, 0x1d, 0x52, 0x5a, 0x21, - 0xf9, 0xcd, 0xa3, 0x30, 0x16, 0xb9, 0x0d, 0xe1, 0x87, 0x08, 0x78, 0xa2, - 0xdb, 0x7e, 0x16, 0x82, 0x48, 0x48, 0x17, 0x1a, 0xa8, 0x3f, 0xc7, 0x4d, - 0xfd, 0x99, 0x2b, 0x36, 0xbf, 0x08, 0xb9, 0xeb, 0xa6, 0xbf, 0xb6, 0xa0, - 0x9e, 0x26, 0x15, 0xac, 0xd2, 0x65, 0xc9, 0x36, 0x41, 0xe3, 0x59, 0x4e, - 0xdc, 0x7b, 0x58, 0x3b, 0x47, 0x0b, 0xc9, 0xf3, 0xb3, 0xf9, 0x81, 0x33, - 0x39, 0xca, 0xf8, 0x97, 0x2d, 0x9b, 0x24, 0x33, 0x69, 0xbe, 0x1b, 0x81, - 0x59, 0x59, 0x17, 0xed, 0x7d, 0x5b, 0xbe, 0xda, 0xeb, 0x4e, 0x5d, 0x5d, - 0x70, 0x13, 0x3c, 0x4b, 0x4a, 0xfc, 0xa4, 0xbe, 0xa0, 0x5d, 0xa2, 0xed, - 0xe8, 0x8d, 0xf8, 0xf2, 0xa5, 0xdd, 0xd4, 0x49, 0x45, 0x04, 0xef, 0x18, - 0x9f, 0xa1, 0xf7, 0xc4, 0x3b, 0xc2, 0x6b, 0xe0, 0x45, 0xa8, 0x76, 0x39, - 0x49, 0x32, 0xec, 0xc3, 0xcb, 0x45, 0x46, 0xd2, 0x4b, 0x3a, 0x55, 0xe5, - 0xce, 0x08, 0xc4, 0x84, 0xe5, 0xd9, 0xb3, 0xf3, 0xc4, 0xa8, 0xe9, 0x88, - 0x83, 0xd5, 0x56, 0xe1, 0xa6, 0xef, 0x41, 0x55, 0xb0, 0x3f, 0xa3, 0xc1, - 0xbe, 0x3b, 0x83, 0xd6, 0x92, 0x90, 0x38, 0xd3, 0xf3, 0x75, 0xf6, 0x49, - 0x95, 0xee, 0xa9, 0xed, 0xaa, 0xf8, 0xb9, 0x14, 0x0e, 0x6a, 0x48, 0x9d, - 0xc5, 0x48, 0x3b, 0x5e, 0x61, 0xd3, 0x8c, 0x4a, 0x10, 0x12, 0x7c, 0x0a, - 0xf7, 0xaf, 0x62, 0x2d, 0xd3, 0x89, 0x8d, 0x75, 0x19, 0x6b, 0x62, 0x4b, - 0x1a, 0x04, 0xc7, 0xd3, 0x32, 0x17, 0x2f, 0x5f, 0x29, 0xfa, 0xb1, 0x8d, - 0x78, 0xe7, 0x27, 0xf6, 0x67, 0x7e, 0x17, 0xa3, 0x18, 0xdc, 0x13, 0x08, - 0x1e, 0x4b, 0xc7, 0x8e, 0xf6, 0xba, 0x90, 0xb3, 0x32, 0x42, 0x37, 0x6b, - 0x60, 0xa9, 0x23, 0xb5, 0x89, 0x57, 0x7b, 0xdb, 0x98, 0x35, 0x1f, 0x95, - 0x86, 0xa5, 0x83, 0x36, 0xd1, 0x8c, 0x8e, 0xc0, 0x77, 0x5c, 0x40, 0x8e, - 0xec, 0xdf, 0x25, 0x69, 0x0a, 0x83, 0x8f, 0xdf, 0x91, 0x52, 0x31, 0xab, - 0xd5, 0x61, 0x37, 0xbd, 0x83, 0x1d, 0x4c, 0x8b, 0xa1, 0x4a, 0x81, 0x8b, - 0xa0, 0xf4, 0x41, 0xbd, 0x54, 0x36, 0x36, 0x56, 0x6d, 0x4c, 0xe7, 0xd9, - 0xc7, 0x09, 0xd9, 0x4b, 0xf0, 0x54, 0x45, 0x3c, 0x62, 0x47, 0x17, 0x54, - 0x1f, 0x55, 0x2f, 0x74, 0xdc, 0x11, 0xe9, 0xa3, 0xb5, 0x75, 0xe9, 0x10, - 0xde, 0x62, 0xa9, 0x24, 0x39, 0xd4, 0x17, 0xbb, 0x15, 0xe4, 0x48, 0x09, - 0x26, 0x6a, 0xbd, 0x3b, 0x10, 0xa1, 0x55, 0xe5, 0x99, 0x53, 0x1e, 0xd2, - 0xee, 0x7c, 0x54, 0xd8, 0x06, 0x8b, 0x1e, 0xe7, 0x3f, 0x08, 0x38, 0x9b, - 0x2e, 0x41, 0xdf, 0x0b, 0x7e, 0x83, 0x7f, 0x04, 0x38, 0xa5, 0x1f, 0x46, - 0x8b, 0x94, 0x28, 0x9f, 0xb8, 0x8c, 0x41, 0xfe, 0x96, 0xe2, 0x24, 0xd1, - 0x97, 0xa4, 0xcb, 0xba, 0xfa, 0x19, 0xc9, 0x57, 0x30, 0x0f, 0x88, 0x58, - 0xa9, 0x67, 0x31, 0x74, 0x51, 0x34, 0x03, 0xbc, 0xff, 0x3b, 0x12, 0x61, - 0x84, 0x63, 0x74, 0xec, 0x4d, 0xda, 0xa3, 0x56, 0xc3, 0xe5, 0x5e, 0x4a, - 0x03, 0x26, 0x88, 0x1a, 0x1d, 0x7f, 0xe8, 0x3f, 0x61, 0x78, 0xb6, 0xc5, - 0x66, 0xb7, 0xb4, 0xc1, 0xe7, 0x82, 0xc1, 0x44, 0xdf, 0xf9, 0x30, 0x30, - 0xe1, 0xd0, 0xf8, 0xf5, 0x40, 0x5a, 0x72, 0x29, 0xef, 0x30, 0xe1, 0x01, - 0xca, 0x1b, 0xb0, 0xa6, 0xa3, 0x17, 0x2b, 0x58, 0x03, 0xda, 0x25, 0x0f, - 0xdc, 0x49, 0x7c, 0xc5, 0x8f, 0x2d, 0x83, 0xca, 0x43, 0x08, 0xc0, 0x36, - 0x70, 0x1e, 0x42, 0xfd, 0xac, 0x4d, 0x31, 0xcf, 0x68, 0x4a, 0xda, 0xd8, - 0xcb, 0xee, 0xaa, 0xfc, 0xcf, 0xcc, 0xe6, 0xb2, 0x77, 0x8b, 0x83, 0x5b, - 0xd5, 0x3d, 0x55, 0xba, 0x03, 0x45, 0xce, 0x51, 0x78, 0x36, 0xcb, 0xcd, - 0x9a, 0x0f, 0x58, 0xbe, 0x15, 0x10, 0xdb, 0x3f, 0x1d, 0x28, 0x27, 0x11, - 0x69, 0xca, 0x95, 0x68, 0xa8, 0xc8, 0xff, 0x0c, 0x3f, 0xd5, 0x11, 0x91, - 0x35, 0x45, 0x35, 0x9d, 0x1c, 0x58, 0xa2, 0xe5, 0xab, 0x83, 0x95, 0x10, - 0x44, 0xd4, 0xc0, 0x27, 0xf4, 0xc2, 0x72, 0x0f, 0x1a, 0x3d, 0x1c, 0xf2, - 0x7f, 0xb9, 0x54, 0xf2, 0x41, 0x24, 0xa8, 0x67, 0x30, 0xa0, 0x57, 0x67, - 0x00, 0xa8, 0x06, 0x60, 0xc3, 0x74, 0x6d, 0x54, 0x90, 0x5e, 0xad, 0x71, - 0x41, 0x50, 0xab, 0x9d, 0xba, 0x34, 0x1a, 0xfd, 0x19, 0x21, 0x0e, 0x87, - 0xb7, 0x22, 0xe6, 0xca, 0xb9, 0x0d, 0x3c, 0x4f, 0xad, 0x16, 0xf1, 0xa5, - 0x6d, 0xba, 0x6d, 0x7b, 0xbe, 0x7b, 0xe3, 0x95, 0xec, 0x1b, 0x8b, 0x6e, - 0xb0, 0xdc, 0x5c, 0xfd, 0x31, 0x73, 0x85, 0x02, 0x63, 0xc6, 0xcc, 0x04, - 0x29, 0xa5, 0xf4, 0x1f, 0xcb, 0x90, 0xf7, 0x83, 0x0d, 0x36, 0xbf, 0x31, - 0xc0, 0xfc, 0x26, 0x15, 0x87, 0xc8, 0x15, 0x88, 0xc9, 0x79, 0x11, 0x67, - 0x23, 0x53, 0xca, 0x03, 0x7a, 0x02, 0xe5, 0xfc, 0xb3, 0x38, 0xf3, 0x5d, - 0xfc, 0x91, 0x6f, 0x59, 0x26, 0xae, 0xd8, 0x45, 0xfa, 0xc4, 0x5b, 0xa2, - 0xfb, 0x2c, 0xc5, 0x36, 0xc6, 0x0d, 0x7b, 0x4e, 0xd2, 0x7f, 0x61, 0xc5, - 0xcc, 0x74, 0xd3, 0x41, 0xd4, 0x8a, 0xaf, 0xcb, 0x32, 0x50, 0xca, 0xeb, - 0x59, 0x0a, 0x05, 0x25, 0xe0, 0x5f, 0x30, 0x2b, 0x5d, 0x9b, 0xf7, 0xe8, - 0x14, 0x14, 0xb5, 0xfe, 0xd5, 0x2f, 0x94, 0x84, 0x5b, 0xc7, 0x4f, 0x82, - 0x01, 0x50, 0xbf, 0x54, 0xe2, 0x7d, 0xeb, 0x0c, 0x85, 0xc8, 0x99, 0x45, - 0x50, 0x8e, 0x4e, 0x10, 0x12, 0x01, 0x17, 0x41, 0xf3, 0x21, 0x4a, 0xee, - 0xaf, 0x0f, 0x76, 0x44, 0xe2, 0x8e, 0xf8, 0x36, 0x25, 0xab, 0x0d, 0x8f, - 0xb1, 0x0a, 0xbf, 0x63, 0x0e, 0xf2, 0x0c, 0x9d, 0x39, 0xa1, 0x98, 0x98, - 0x69, 0x91, 0xd1, 0x9b, 0xe8, 0xcf, 0x16, 0x65, 0x02, 0xc9, 0x67, 0x72, - 0x71, 0x7c, 0xfb, 0x41, 0x2d, 0xe4, 0xd3, 0xfb, 0x44, 0x8a, 0x7a, 0x88, - 0x32, 0x62, 0x26, 0x63, 0xfe, 0x5b, 0x0c, 0x4f, 0x6c, 0xad, 0x2f, 0x64, - 0x6f, 0xc9, 0xda, 0x95, 0x10, 0xbe, 0xd1, 0xfa, 0x8b, 0x67, 0x64, 0x35, - 0x2d, 0xed, 0xca, 0xf3, 0x12, 0xb7, 0x06, 0xc3, 0xa9, 0x8e, 0x3f, 0x09, - 0x4d, 0x1f, 0x50, 0x3a, 0x97, 0xb7, 0xa7, 0xce, 0x4d, 0x46, 0xf1, 0x61, - 0xc1, 0x06, 0x95, 0x0d, 0x07, 0xa2, 0xbc, 0xed, 0xeb, 0x45, 0xb4, 0x69, - 0x05, 0x7a, 0x30, 0x47, 0xa3, 0xbf, 0x81, 0xa9, 0xa7, 0xf0, 0x53, 0x36, - 0x31, 0x37, 0x13, 0xe5, 0x0e, 0xd6, 0xe6, 0xc7, 0x17, 0x17, 0x21, 0x6d, - 0x36, 0xd0, 0xf6, 0x2a, 0xea, 0x2d, 0x32, 0x0e, 0x90, 0x03, 0x30, 0x4d, - 0x30, 0x31, 0xaa, 0x79, 0x2d, 0xae, 0x2e, 0xb0, 0x13, 0xad, 0x63, 0x69, - 0x67, 0xd8, 0xf3, 0x6e, 0xa4, 0x34, 0xcf, 0x02, 0x10, 0xdd, 0x76, 0xfa, - 0xa7, 0xb0, 0x92, 0xea, 0x47, 0xbd, 0xff, 0xf9, 0xac, 0x8a, 0x1f, 0x31, - 0xf8, 0x05, 0xd4, 0xce, 0x23, 0xad, 0x32, 0x8c, 0x6c, 0x92, 0x85, 0xb9, - 0x74, 0xa6, 0xab, 0x6e, 0x76, 0xfd, 0x3e, 0x8a, 0xac, 0xa3, 0xd1, 0xb7, - 0x40, 0x53, 0x87, 0x28, 0xfc, 0xbc, 0x8a, 0x52, 0x8e, 0x2e, 0x59, 0x2c, - 0x5f, 0x3f, 0xcb, 0xd8, 0xbe, 0x37, 0xfd, 0xdc, 0xc0, 0x34, 0x85, 0x67, - 0x28, 0x9f, 0x1d, 0x05, 0x05, 0x94, 0xed, 0x6f, 0x54, 0x7a, 0x51, 0x9a, - 0xaa, 0xca, 0xe1, 0x41, 0x10, 0xf0, 0x9d, 0x38, 0x9c, 0x5e, 0x95, 0xe3, - 0x7e, 0x62, 0xe2, 0x31, 0x81, 0x28, 0x4a, 0x3c, 0x5e, 0x04, 0x11, 0xe2, - 0x6a, 0x45, 0x6f, 0x68, 0x96, 0x5b, 0xbf, 0x22, 0xd8, 0x29, 0x91, 0x76, - 0xe1, 0xb2, 0x5f, 0xfc, 0x89, 0x90, 0x87, 0xf8, 0xb8, 0x3f, 0xd5, 0x11, - 0xe7, 0x36, 0x47, 0x71, 0xb9, 0x52, 0x97, 0x8e, 0x62, 0x8b, 0x05, 0x31, - 0xe5, 0xd9, 0xa2, 0xc3, 0x1a, 0xb5, 0xda, 0xc7, 0xa5, 0x37, 0x06, 0x67, - 0x41, 0x1f, 0x6e, 0xa3, 0xc2, 0xb4, 0x96, 0x64, 0xfc, 0x46, 0x85, 0x95, - 0x4e, 0xd8, 0x2a, 0x4b, 0xaa, 0x1e, 0xec, 0xd5, 0xed, 0x81, 0x23, 0x68, - 0x0f, 0x5d, 0x0b, 0x95, 0x29, 0xd4, 0x36, 0x4d, 0x8c, 0x32, 0x73, 0x6a, - 0xb7, 0xad, 0xb8, 0x9c, 0xad, 0x76, 0x09, 0xad, 0xb9, 0xea, 0x2d, 0x17, - 0x3c, 0x33, 0x87, 0x7f, 0x62, 0x74, 0x77, 0xc9, 0xd6, 0x3d, 0x17, 0xbc, - 0xff, 0x57, 0x10, 0xec, 0x7a, 0xb7, 0x89, 0x05, 0x26, 0xf1, 0xb2, 0x53, - 0xa1, 0x91, 0xc5, 0x2a, 0xfb, 0x5a, 0xce, 0x5d, 0xd1, 0x6b, 0xbc, 0xb7, - 0x39, 0x09, 0x43, 0xdf, 0x20, 0xd3, 0xc1, 0x74, 0x8d, 0xf4, 0x0b, 0x2a, - 0xc7, 0xe8, 0xa1, 0x5f, 0xb2, 0xfe, 0x1a, 0x96, 0x3a, 0x92, 0xbc, 0x8f, - 0x85, 0xe2, 0x22, 0x73, 0x3f, 0x49, 0xb3, 0x6b, 0x90, 0xbd, 0xcb, 0x3f, - 0x36, 0x6c, 0x3d, 0xe3, 0x00, 0x00, 0x00, 0x00, 0x56, 0xd1, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x1f, 0x05, 0x81, 0x3f, - 0x25, 0x68, 0xde, 0x72, 0x88, 0x26, 0x66, 0x2d, 0xe4, 0xc8, 0x81, 0xf8, - 0x5d, 0x98, 0xa2, 0xc2, 0x02, 0x62, 0x63, 0x47, 0xe6, 0x61, 0x7f, 0xee, - 0xca, 0x3f, 0x81, 0xd7, 0x1e, 0xa9, 0xbf, 0x66, 0x59, 0x7f, 0xc3, 0x35, - 0x03, 0xae, 0xe5, 0xf2, 0x4d, 0x81, 0x82, 0x78, 0x5e, 0xaf, 0xaa, 0xd1, - 0x27, 0x41, 0x19, 0x93, 0xa8, 0x9b, 0x78, 0x4e, 0x95, 0x89, 0x7f, 0xce, - 0x49, 0xd0, 0x45, 0xb5, 0x7f, 0x1d, 0xe9, 0xee, 0x7f, 0x91, 0xf4, 0x0a, - 0x67, 0x7d, 0x75, 0xff, 0x38, 0x81, 0x27, 0x90, 0x14, 0xa5, 0x99, 0x40, - 0x5b, 0xe6, 0x9a, 0x81, 0x75, 0x22, 0x5f, 0x18, 0x81, 0x34, 0xb7, 0x54, - 0x2e, 0x8d, 0x81, 0x36, 0x0e, 0x5e, 0xc0, 0x5f, 0xd4, 0xc6, 0x34, 0x81, - 0xc8, 0xb9, 0xe2, 0xa9, 0x77, 0x81, 0x44, 0xb4, 0x06, 0x24, 0x81, 0x74, - 0x1c, 0xeb, 0xfb, 0xdd, 0x25, 0x81, 0x14, 0x09, 0x2d, 0xba, 0x11, 0x4b, - 0x07, 0x13, 0xf1, 0xae, 0x81, 0xaf, 0xa3, 0x87, 0x00, 0x00, 0x00, 0x00, - 0xf6, 0xd1, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x00, - 0x8a, 0x29, 0x03, 0xe6, 0x24, 0x2a, 0xd6, 0x21, 0xb6, 0xb1, 0x2d, 0x3a, - 0xff, 0xd6, 0x27, 0xd7, 0x18, 0x42, 0xc1, 0xb4, 0xf8, 0xfd, 0xdf, 0x45, - 0x09, 0x91, 0xcb, 0xfe, 0xe9, 0xb5, 0x24, 0xf1, 0xc0, 0x69, 0xd0, 0x64, - 0xa8, 0xeb, 0x12, 0x71, 0xe3, 0xb4, 0xbe, 0xb4, 0x93, 0xbf, 0x8a, 0x8b, - 0xf3, 0x4d, 0x13, 0x3b, 0x6f, 0x6f, 0x32, 0x12, 0x98, 0x95, 0xb9, 0x63, - 0xcd, 0xa5, 0x23, 0xa4, 0xb8, 0x2e, 0x74, 0x75, 0xbc, 0xe4, 0xc7, 0x46, - 0x96, 0xd4, 0x47, 0xa0, 0x65, 0xec, 0xea, 0xcf, 0xd0, 0xdc, 0xe9, 0x8b, - 0xcc, 0x1d, 0x2f, 0x0d, 0x0a, 0x9c, 0x6e, 0x99, 0x97, 0x97, 0xcc, 0x00, - 0xd2, 0x8e, 0xbc, 0x3c, 0x9a, 0xf1, 0x32, 0x0e, 0xf3, 0xd6, 0x27, 0x1c, - 0xea, 0xab, 0xca, 0x4d, 0x69, 0x32, 0x30, 0x5f, 0x18, 0xd7, 0xb7, 0x4a, - 0xcb, 0x8e, 0xb2, 0x96, 0x39, 0xa3, 0xc7, 0x42, 0xca, 0x60, 0x9b, 0xad, - 0x8e, 0xb7, 0x54, 0x32, 0xea, 0xfd, 0x58, 0xfa, 0xf8, 0x02, 0xef, 0x2f, - 0xec, 0x3c, 0x2a, 0x1a, 0x6a, 0x08, 0xa4, 0x4b, 0xec, 0x30, 0x90, 0xaf, - 0x13, 0x98, 0xcd, 0x48, 0xfd, 0x5f, 0x56, 0x68, 0x17, 0x9e, 0x87, 0xb1, - 0x2b, 0x16, 0xd3, 0x3c, 0xe0, 0xe8, 0x0e, 0xa6, 0xc4, 0x24, 0xd3, 0x05, - 0x75, 0xda, 0x22, 0x44, 0xb5, 0x41, 0xd2, 0xa5, 0x99, 0xf1, 0x5e, 0xbe, - 0x15, 0xb7, 0x33, 0x54, 0x9a, 0x97, 0x5b, 0x35, 0x77, 0x2b, 0x18, 0x46, - 0x2f, 0x92, 0xc5, 0x97, 0x2d, 0x4c, 0xa6, 0xf8, 0x9e, 0xc3, 0xe0, 0x0a, - 0x52, 0xf9, 0x97, 0xc7, 0xd6, 0x36, 0xdd, 0x38, 0xaa, 0xf3, 0x05, 0x30, - 0xc3, 0xe5, 0xaf, 0x54, 0xdc, 0xc4, 0xf2, 0x01, 0x9e, 0xe6, 0xc1, 0x89, - 0xee, 0xd8, 0x5f, 0xfe, 0xf0, 0x70, 0x3c, 0xc4, 0x40, 0xa4, 0xd4, 0xee, - 0xaf, 0x3d, 0xe6, 0xcd, 0x31, 0x16, 0x31, 0x3b, 0xa0, 0x0e, 0xc4, 0x71, - 0xbf, 0xbd, 0x39, 0x89, 0x0f, 0x36, 0xba, 0xd8, 0xa2, 0x49, 0x01, 0xab, - 0xf4, 0x07, 0x99, 0xc7, 0xb1, 0x0c, 0x33, 0x9d, 0x71, 0xf1, 0x15, 0x4b, - 0x60, 0xe0, 0xed, 0x59, 0x0a, 0x34, 0xd9, 0xa2, 0x45, 0x99, 0x4a, 0x60, - 0xd3, 0xdc, 0x37, 0x56, 0x32, 0x4c, 0xea, 0xdc, 0xcf, 0xe6, 0x22, 0x27, - 0x17, 0xea, 0x75, 0x3f, 0x69, 0xd4, 0xcf, 0x53, 0x92, 0x98, 0xf4, 0xfe, - 0x13, 0xa8, 0xe2, 0xb2, 0x48, 0x5f, 0x64, 0xab, 0x2b, 0x61, 0x97, 0xf5, - 0xc5, 0xb6, 0xef, 0x32, 0x4e, 0x47, 0x26, 0x42, 0x48, 0x9c, 0x5b, 0x24, - 0xa3, 0xcb, 0x70, 0xc7, 0x31, 0x6c, 0xc8, 0x4d, 0x5c, 0x02, 0xca, 0x71, - 0x1e, 0x56, 0xdb, 0x27, 0x66, 0x5d, 0x4f, 0x0b, 0x09, 0x57, 0xbe, 0x72, - 0x17, 0x3b, 0xce, 0xdd, 0xd2, 0x20, 0x13, 0x67, 0x32, 0x04, 0xee, 0xc4, - 0x66, 0x23, 0x0e, 0x97, 0x5e, 0x21, 0x30, 0xb2, 0xe4, 0x16, 0x06, 0x57, - 0xc3, 0x9b, 0x29, 0x5b, 0x76, 0xd0, 0x36, 0xac, 0xe6, 0xa2, 0x91, 0x57, - 0x96, 0x4e, 0x1c, 0x6f, 0x4a, 0x03, 0x50, 0x55, 0x6d, 0xaf, 0x9a, 0x29, - 0xc9, 0x61, 0x6c, 0x18, 0x4c, 0xb9, 0xd5, 0x41, 0xf8, 0x75, 0x2b, 0xc3, - 0x0e, 0x69, 0x9f, 0x45, 0x93, 0x2f, 0xa6, 0xf9, 0x30, 0x65, 0x05, 0x13, - 0xe3, 0x00, 0x54, 0x0e, 0xa4, 0xb5, 0x89, 0x6d, 0x4d, 0x11, 0x3d, 0x2a, - 0x29, 0x99, 0xd9, 0xdf, 0x75, 0xce, 0x01, 0x21, 0xbc, 0x26, 0xb3, 0x22, - 0xf9, 0xb0, 0x45, 0x5c, 0xf8, 0xea, 0xb2, 0x08, 0x1a, 0xf7, 0xa0, 0x70, - 0x65, 0xa8, 0xab, 0xe1, 0x92, 0xcc, 0xcc, 0x1f, 0x0e, 0x36, 0x60, 0xb7, - 0xea, 0xcb, 0x3d, 0xf6, 0x98, 0xbf, 0xcd, 0x00, 0xc9, 0x16, 0x1e, 0xdb, - 0x58, 0x24, 0xb1, 0xd8, 0xaf, 0x01, 0x00, 0xfa, 0x15, 0xf4, 0x37, 0x05, - 0xd7, 0x17, 0x2a, 0xd2, 0xe8, 0xe4, 0x0c, 0x50, 0xfa, 0xe8, 0xd6, 0x99, - 0xa9, 0x58, 0x61, 0x38, 0xee, 0x22, 0x3c, 0x53, 0xcf, 0x64, 0x8e, 0xad, - 0x4d, 0xd6, 0xc3, 0xc3, 0xdd, 0xb0, 0xb3, 0xf7, 0xdd, 0x37, 0xfd, 0xf3, - 0x2b, 0x6a, 0xe2, 0xd4, 0xfc, 0x0c, 0x74, 0xca, 0x37, 0x2f, 0xd2, 0xf8, - 0x5b, 0xf1, 0x8c, 0x32, 0xa0, 0xdc, 0x2c, 0xa8, 0x36, 0x2f, 0xbe, 0x45, - 0x9b, 0x42, 0x95, 0x15, 0x5e, 0x08, 0xb1, 0x61, 0xec, 0xa2, 0xdf, 0x5f, - 0xca, 0xf8, 0x62, 0x73, 0xfd, 0x66, 0xc8, 0x51, 0x2a, 0x69, 0x3c, 0x8f, - 0x75, 0xa4, 0x6f, 0xbe, 0xc1, 0x5c, 0x66, 0xe2, 0x60, 0x92, 0xd7, 0x0e, - 0xee, 0x1b, 0xc7, 0x39, 0x8b, 0x56, 0x6c, 0xc6, 0x20, 0xfa, 0xec, 0x96, - 0xa5, 0x0f, 0x74, 0x42, 0x32, 0x12, 0x11, 0xdf, 0x02, 0xfe, 0x42, 0x1c, - 0xfe, 0xf1, 0x72, 0xaf, 0x47, 0x3b, 0x62, 0xe3, 0x27, 0x29, 0xf0, 0xec, - 0x39, 0xd2, 0xdd, 0xb6, 0xe9, 0xbe, 0x5f, 0x66, 0x67, 0x6c, 0xc9, 0xa1, - 0xf0, 0x25, 0x9a, 0x1b, 0xa8, 0xa0, 0x15, 0xcb, 0x61, 0x98, 0x98, 0xfd, - 0xef, 0xba, 0x74, 0x9b, 0x54, 0xf3, 0x6d, 0xe1, 0xa4, 0xcf, 0xb5, 0xe7, - 0xba, 0x0f, 0xd1, 0x41, 0xd8, 0x63, 0x94, 0x09, 0xcd, 0x4f, 0xb1, 0x31, - 0x49, 0x5e, 0x54, 0xb1, 0x28, 0x39, 0x8e, 0x13, 0x48, 0x2e, 0x20, 0xb0, - 0xf7, 0x18, 0x9a, 0xea, 0xf2, 0x9b, 0xde, 0x8f, 0x16, 0xc8, 0x9e, 0x31, - 0xca, 0x94, 0x28, 0x26, 0x0d, 0x8c, 0x0f, 0x09, 0x69, 0xc5, 0x2a, 0x38, - 0xae, 0x6b, 0xfb, 0x4f, 0xbb, 0xf4, 0x14, 0xea, 0x8d, 0x13, 0xc0, 0x09, - 0xe2, 0xfb, 0xfb, 0x09, 0xa1, 0xfc, 0x49, 0xff, 0x0f, 0x52, 0x3e, 0xe8, - 0xda, 0xfe, 0xe1, 0x67, 0x8f, 0x21, 0xcf, 0xaf, 0xb7, 0xe2, 0xcf, 0x09, - 0x15, 0x10, 0x51, 0x72, 0x8f, 0x42, 0x09, 0x9d, 0xea, 0x27, 0x2d, 0x25, - 0x9f, 0x54, 0x50, 0xfa, 0xdf, 0x9f, 0x41, 0xe8, 0xd2, 0x66, 0xd8, 0x28, - 0xfb, 0x8b, 0xe4, 0x42, 0x03, 0x92, 0xf9, 0xcd, 0xcc, 0xb0, 0xc0, 0x52, - 0x53, 0x6d, 0xcd, 0xed, 0x16, 0xad, 0x3c, 0x3d, 0xf9, 0x3b, 0x05, 0xbb, - 0xac, 0x9e, 0xa3, 0x4b, 0x17, 0xb4, 0xc7, 0xdd, 0xd4, 0xd3, 0x0c, 0x10, - 0x0d, 0xd8, 0x9c, 0xdb, 0xa4, 0x60, 0x06, 0x89, 0x4b, 0x06, 0x4c, 0x9f, - 0xc4, 0x47, 0xc8, 0xaf, 0xab, 0x02, 0x23, 0x89, 0x6e, 0xf2, 0x9d, 0x2b, - 0x6b, 0x9a, 0xa4, 0xee, 0x16, 0x0b, 0x3c, 0x76, 0xd4, 0xf0, 0x17, 0x90, - 0xca, 0xf5, 0xc8, 0xbf, 0xcb, 0xb1, 0x02, 0x69, 0x34, 0x71, 0x59, 0x5d, - 0x0e, 0x56, 0xd8, 0x41, 0x0a, 0xa5, 0x0a, 0x16, 0xbc, 0x93, 0x63, 0xf9, - 0xd9, 0xab, 0x3e, 0x75, 0x1e, 0xd3, 0xf3, 0x56, 0xf5, 0x14, 0xee, 0x65, - 0xf3, 0x2f, 0x72, 0x03, 0xcb, 0x69, 0x90, 0x91, 0x0d, 0x31, 0x8e, 0x3e, - 0xe9, 0xb0, 0xe6, 0x2e, 0x37, 0x5d, 0xb0, 0x38, 0x52, 0xe6, 0x23, 0x24, - 0x36, 0xb2, 0xe9, 0xa5, 0xa0, 0xae, 0xed, 0xfd, 0x95, 0xa5, 0xcf, 0x4a, - 0xe3, 0xbd, 0xe7, 0x29, 0xd0, 0x57, 0x3e, 0xf1, 0xdf, 0xc8, 0xc7, 0x26, - 0xf6, 0xc7, 0x4b, 0xc8, 0x6a, 0x4a, 0xed, 0x49, 0x60, 0x2d, 0x1c, 0xe3, - 0x8b, 0x10, 0x24, 0xfc, 0xef, 0xbb, 0x1e, 0x24, 0xbb, 0x40, 0xeb, 0x99, - 0xba, 0xe1, 0x4a, 0xd4, 0x1f, 0x69, 0x47, 0xa4, 0x8f, 0x48, 0x05, 0x17, - 0xcb, 0xee, 0x55, 0xca, 0xe5, 0xe3, 0x60, 0xec, 0xfa, 0xe6, 0xd1, 0x28, - 0xc5, 0xa8, 0x04, 0xd8, 0xce, 0x13, 0x2b, 0x99, 0x2b, 0xc7, 0x94, 0x9d, - 0xda, 0xd7, 0x6f, 0x31, 0xfe, 0xee, 0x6c, 0x9b, 0xf1, 0x70, 0xd2, 0xee, - 0xc4, 0xba, 0xb7, 0xbe, 0xd3, 0x37, 0xdc, 0x43, 0x4e, 0x30, 0x4a, 0x67, - 0xf2, 0x45, 0x29, 0xe1, 0x8b, 0xb8, 0x6d, 0xca, 0xec, 0xb9, 0xd6, 0xd3, - 0xdd, 0xcb, 0xde, 0xdb, 0xa9, 0x4d, 0xdd, 0x3d, 0x41, 0xae, 0x99, 0x89, - 0xce, 0x70, 0x50, 0x61, 0x07, 0xf3, 0xca, 0x24, 0x56, 0x76, 0x3f, 0xe0, - 0x6e, 0xbe, 0xa7, 0xc6, 0xac, 0x6c, 0xf1, 0x8c, 0xa2, 0x0e, 0xc4, 0x2a, - 0x48, 0x30, 0x8b, 0xc9, 0xc0, 0x5a, 0xb2, 0x2b, 0xbd, 0xa2, 0xcc, 0xf7, - 0x25, 0x16, 0xc3, 0xde, 0x1b, 0x8d, 0x23, 0x8c, 0xb6, 0xc4, 0xaa, 0x4a, - 0x0b, 0x66, 0x25, 0x35, 0xb3, 0x9a, 0x74, 0x27, 0x63, 0xea, 0xef, 0x92, - 0x12, 0x8c, 0x58, 0xd9, 0x3a, 0x55, 0xd6, 0x61, 0x29, 0x9f, 0xbc, 0x28, - 0xbd, 0x30, 0xcd, 0x43, 0xe6, 0x36, 0x36, 0x66, 0x20, 0x8c, 0x9e, 0x23, - 0xfe, 0x6d, 0xf0, 0xbc, 0x61, 0xcd, 0x58, 0xd8, 0xe0, 0x2e, 0xe4, 0xcf, - 0x61, 0xf7, 0xd5, 0x6b, 0x54, 0x33, 0xb3, 0x2c, 0x60, 0xa8, 0x59, 0x21, - 0x5d, 0xaa, 0x65, 0x9e, 0xdc, 0xa3, 0xc9, 0xc4, 0x9d, 0x4d, 0x95, 0x29, - 0xf6, 0x2b, 0xcd, 0xc9, 0xb9, 0x9d, 0x46, 0xa0, 0x89, 0xf4, 0x4e, 0x52, - 0x55, 0xe2, 0x13, 0x98, 0xf0, 0xef, 0x27, 0xc3, 0xc9, 0xd1, 0xe1, 0xee, - 0x07, 0x1b, 0x9d, 0x8a, 0x5b, 0x9d, 0x06, 0x26, 0x61, 0x2a, 0x55, 0x6f, - 0x54, 0x22, 0xd5, 0x06, 0x20, 0xed, 0x06, 0x4d, 0xa2, 0xb3, 0xaa, 0x4f, - 0x1f, 0x3e, 0xd2, 0x0d, 0x6a, 0xab, 0x6d, 0xee, 0x8f, 0x09, 0xb2, 0xd9, - 0x39, 0x46, 0x0f, 0xe7, 0x51, 0x70, 0x51, 0xdb, 0x09, 0xf8, 0x8e, 0xbb, - 0x06, 0x98, 0x49, 0x69, 0xb7, 0x9e, 0xa0, 0xbc, 0x16, 0x5f, 0x96, 0xad, - 0xe9, 0x76, 0x9f, 0x71, 0xe2, 0x1b, 0x91, 0x73, 0xd9, 0x74, 0x6a, 0x70, - 0x48, 0x71, 0x47, 0x3b, 0x0c, 0xd5, 0x96, 0xe3, 0x6e, 0xdb, 0xbb, 0x9c, - 0x44, 0x5c, 0xe5, 0x07, 0x73, 0x31, 0xd1, 0x55, 0x07, 0xff, 0x5f, 0xb1, - 0x55, 0x9d, 0x0d, 0xbf, 0x32, 0x53, 0xf9, 0xfe, 0xcd, 0xc8, 0xe0, 0x56, - 0x18, 0x8f, 0x4b, 0x51, 0xd1, 0x23, 0x2e, 0x9f, 0xb9, 0xee, 0xf3, 0xfd, - 0x26, 0x02, 0xf6, 0x54, 0xd5, 0x3e, 0x13, 0xc1, 0xc1, 0xe4, 0xa8, 0xb4, - 0x5f, 0x5c, 0xa0, 0x9f, 0xb5, 0x19, 0xbb, 0x4e, 0xd6, 0xf8, 0x18, 0x9b, - 0xeb, 0x9e, 0x58, 0x9d, 0x00, 0x51, 0x24, 0x28, 0x70, 0x55, 0xf7, 0xb9, - 0x5a, 0x59, 0x50, 0xc5, 0x72, 0xab, 0x6b, 0x13, 0x95, 0xfb, 0xe4, 0xc2, - 0x05, 0x96, 0xf3, 0x48, 0xef, 0x02, 0x67, 0xd5, 0x8f, 0x5b, 0x8e, 0xb6, - 0xbe, 0xc1, 0x3d, 0x8e, 0x22, 0xee, 0x49, 0xc7, 0xbe, 0xfb, 0x2d, 0x51, - 0x45, 0x44, 0xca, 0x94, 0x8e, 0xce, 0xb5, 0x9a, 0x29, 0xc7, 0x52, 0xde, - 0x2c, 0xdf, 0xcc, 0x43, 0xc7, 0xd7, 0x51, 0xb7, 0x07, 0xf0, 0x9b, 0x9d, - 0x33, 0x98, 0x62, 0xfa, 0xc9, 0x13, 0x0b, 0xcd, 0xdf, 0xbd, 0xff, 0x8e, - 0x13, 0x44, 0xda, 0x62, 0xc0, 0xd1, 0x8d, 0x57, 0x0e, 0xec, 0x53, 0x8a, - 0x04, 0xcf, 0x0f, 0x5a, 0xd7, 0x3c, 0x4b, 0x17, 0xda, 0x3b, 0xf0, 0x30, - 0xbf, 0xea, 0x40, 0xa6, 0x36, 0xed, 0xda, 0xf7, 0x40, 0x6b, 0xf1, 0x1e, - 0x61, 0xa0, 0x8b, 0x5d, 0xfa, 0xa8, 0x6a, 0xca, 0xfd, 0x6a, 0x06, 0xb4, - 0xf5, 0xb6, 0xc7, 0xbe, 0xdf, 0xac, 0x17, 0x00, 0x4a, 0x91, 0x8d, 0x97, - 0x5b, 0xc8, 0xcb, 0xd4, 0xc8, 0x20, 0x0b, 0x53, 0xee, 0x2b, 0x25, 0xb8, - 0xa1, 0x24, 0xa1, 0xa0, 0x17, 0x60, 0xd9, 0xf7, 0x2d, 0x00, 0x6c, 0x70, - 0x44, 0x0d, 0x60, 0xe7, 0x95, 0x1e, 0x8a, 0x1b, 0x29, 0xcf, 0xb5, 0xc1, - 0xbe, 0xd0, 0xe5, 0xeb, 0xd8, 0x71, 0x88, 0x34, 0xcb, 0xbd, 0x32, 0x52, - 0xa7, 0xcf, 0x6d, 0x9b, 0xef, 0xf2, 0xe4, 0x68, 0x6f, 0xfe, 0xb9, 0x17, - 0x31, 0xa0, 0x3e, 0xfc, 0xae, 0xf6, 0x54, 0xe3, 0x33, 0x24, 0xd1, 0xfc, - 0xb7, 0x37, 0x8f, 0xd3, 0x4f, 0xf2, 0x59, 0x53, 0xea, 0xaf, 0x71, 0xc5, - 0xb1, 0xdb, 0xf9, 0xed, 0xc0, 0x46, 0x56, 0xfc, 0x09, 0x90, 0xf7, 0x09, - 0x5a, 0x12, 0x71, 0xad, 0xa6, 0x0f, 0xba, 0x4c, 0x2f, 0xd7, 0x61, 0xcb, - 0xf2, 0xab, 0x44, 0x67, 0x43, 0xd0, 0x41, 0xd5, 0xba, 0xff, 0x26, 0x50, - 0x5b, 0x97, 0x91, 0xc4, 0x8f, 0x2a, 0x64, 0x3c, 0x06, 0x2e, 0x26, 0x8e, - 0x5f, 0xb1, 0xba, 0x74, 0x16, 0xeb, 0xee, 0x6e, 0xe1, 0x68, 0xcc, 0x09, - 0xed, 0xa5, 0x5d, 0xf7, 0xef, 0xd6, 0xfa, 0x9f, 0x39, 0xe1, 0x5c, 0x38, - 0xbd, 0x1b, 0xe6, 0x8a, 0xfa, 0xea, 0xbc, 0x14, 0x4c, 0x31, 0xa8, 0x9d, - 0x64, 0xa6, 0xec, 0xf0, 0xf8, 0xa2, 0x0a, 0x6c, 0xb9, 0xc5, 0x3d, 0x40, - 0x48, 0x41, 0x1d, 0xf2, 0xab, 0xd4, 0xdf, 0xfb, 0x55, 0x9e, 0xa5, 0xac, - 0xe9, 0xf0, 0x46, 0x96, 0xc5, 0x4d, 0x5f, 0x5f, 0x64, 0x00, 0x69, 0x48, - 0x0e, 0xa3, 0xb5, 0x5d, 0x45, 0xce, 0x57, 0xc4, 0x45, 0xdb, 0xc6, 0x13, - 0x4b, 0xa7, 0xa0, 0xd5, 0x31, 0xb4, 0xd4, 0x0f, 0x4f, 0x29, 0x40, 0xc0, - 0xaa, 0xb7, 0x54, 0x21, 0xd5, 0x3a, 0x01, 0xbc, 0xa8, 0x58, 0xb5, 0x3f, - 0xa6, 0x1a, 0x06, 0xb5, 0x07, 0xd3, 0xb6, 0xff, 0x6e, 0x74, 0x08, 0x16, - 0x45, 0xaf, 0xd9, 0xc5, 0x4a, 0x0d, 0xd2, 0x8a, 0xd1, 0x6c, 0xba, 0x5a, - 0xd0, 0xee, 0x57, 0x10, 0xa4, 0x1a, 0xf4, 0x92, 0x97, 0xe0, 0xd7, 0xa8, - 0xff, 0x47, 0xed, 0x56, 0x6b, 0x91, 0x77, 0x5d, 0xa6, 0xcf, 0xed, 0x96, - 0xc5, 0x5a, 0xe3, 0x0b, 0x1d, 0xc0, 0xcc, 0xa1, 0x71, 0x95, 0xa8, 0xec, - 0xef, 0x33, 0x91, 0xd6, 0x53, 0x1f, 0xef, 0x43, 0xa9, 0x42, 0x2a, 0xc7, - 0xf6, 0x15, 0x60, 0xc2, 0xde, 0xeb, 0xac, 0xf8, 0x55, 0x27, 0x14, 0xf1, - 0xf8, 0x69, 0x55, 0xc8, 0x69, 0x1f, 0xf3, 0xc2, 0x71, 0xe8, 0x75, 0xa9, - 0x1a, 0x91, 0xc5, 0x1e, 0xe3, 0x52, 0x24, 0x5f, 0x60, 0xb5, 0xf1, 0xe6, - 0xdd, 0x4b, 0x1b, 0xdd, 0x3a, 0xad, 0x58, 0x36, 0x9c, 0xb3, 0x25, 0x9e, - 0x28, 0xd4, 0x3b, 0x6a, 0x64, 0xe7, 0x57, 0x54, 0xad, 0x4d, 0x44, 0xfc, - 0x54, 0xd3, 0xa3, 0x96, 0x4e, 0xee, 0xde, 0x23, 0x30, 0x30, 0x1f, 0x57, - 0x2f, 0xd6, 0xb4, 0xfa, 0x5c, 0x1b, 0x4a, 0x1b, 0x96, 0x58, 0x9a, 0xc7, - 0x25, 0xd0, 0x9c, 0xf3, 0x2b, 0x16, 0x58, 0x62, 0x0c, 0x5b, 0x45, 0x96, - 0xb0, 0xc2, 0x3e, 0xca, 0x0a, 0xb5, 0x0f, 0x06, 0xa8, 0xa3, 0xb2, 0x0a, - 0x6a, 0xc5, 0xb7, 0xf8, 0x69, 0xfa, 0xc1, 0xa8, 0xbc, 0x17, 0x6c, 0x92, - 0x06, 0x50, 0x74, 0x4b, 0x02, 0xc8, 0x4d, 0x9c, 0x3e, 0x94, 0x6f, 0xef, - 0x3e, 0xd9, 0x71, 0xa6, 0x3a, 0x70, 0x6a, 0x14, 0x0e, 0x06, 0xbe, 0x40, - 0x2b, 0xa1, 0xbb, 0x05, 0x71, 0x05, 0xbd, 0xd5, 0x2d, 0xd9, 0xe2, 0xf6, - 0xb4, 0x32, 0x33, 0xac, 0x0f, 0x9a, 0xe3, 0xaf, 0xf4, 0x44, 0x21, 0x59, - 0x91, 0x0d, 0xd0, 0xf1, 0x47, 0x9e, 0x00, 0x38, 0xa2, 0x1d, 0x61, 0x54, - 0xd2, 0x18, 0x9d, 0xe4, 0x4f, 0xf3, 0xbd, 0x04, 0xdb, 0x4d, 0x59, 0x8c, - 0xfa, 0x12, 0xdd, 0xe4, 0xb5, 0x32, 0x3b, 0xf8, 0x93, 0xae, 0x3b, 0xa9, - 0xb3, 0xe9, 0x57, 0x30, 0x49, 0x6d, 0xaa, 0x35, 0x12, 0xce, 0x16, 0x98, - 0x3c, 0xd0, 0xed, 0xe8, 0xa6, 0xbc, 0xa6, 0xe6, 0x66, 0x0f, 0xb3, 0x12, - 0x95, 0x19, 0x56, 0x23, 0xb1, 0x30, 0x5d, 0xb3, 0x4c, 0x5f, 0x0c, 0xef, - 0x24, 0x12, 0xe0, 0x97, 0xf3, 0x3e, 0x9c, 0x49, 0xff, 0xa6, 0x6f, 0xa6, - 0xd2, 0x58, 0xbe, 0x3f, 0x30, 0xdd, 0x65, 0xd0, 0x40, 0xe1, 0xaf, 0x09, - 0xf1, 0xf4, 0x0f, 0x1a, 0xe5, 0xef, 0x51, 0x50, 0x38, 0x5d, 0xb0, 0x1e, - 0xed, 0x19, 0x8d, 0x4e, 0x20, 0xa1, 0x65, 0x07, 0x5b, 0x23, 0x0c, 0x14, - 0xd3, 0x18, 0xa3, 0xda, 0x58, 0x9f, 0x10, 0x00, 0xbd, 0xb5, 0x95, 0x07, - 0x1d, 0x0f, 0xf9, 0x2a, 0xe4, 0x35, 0x3c, 0x60, 0xad, 0xb2, 0x13, 0x3b, - 0xd5, 0x9e, 0xeb, 0xc7, 0x09, 0x6e, 0x53, 0xff, 0x95, 0xf3, 0xc1, 0x9b, - 0xcd, 0x21, 0x15, 0x3b, 0x5f, 0xfe, 0x4e, 0xaf, 0x3f, 0xf8, 0xe3, 0xa8, - 0x35, 0xee, 0x44, 0x33, 0xc7, 0x8c, 0x9c, 0x1c, 0x33, 0x55, 0x3c, 0x4a, - 0xa4, 0x35, 0xf6, 0xf0, 0x32, 0x8e, 0xed, 0x6d, 0x06, 0xff, 0x8d, 0x24, - 0x05, 0x72, 0x4c, 0xa2, 0x97, 0x25, 0x93, 0x3d, 0x79, 0x18, 0x22, 0x15, - 0xec, 0x5c, 0xc4, 0x10, 0x65, 0xec, 0x90, 0x6d, 0x28, 0xba, 0x93, 0xb5, - 0x2f, 0x53, 0xe4, 0x00, 0x9c, 0x39, 0xf5, 0x4c, 0xde, 0x51, 0x39, 0xc3, - 0xd8, 0x03, 0xc3, 0x97, 0xe1, 0xa8, 0x3e, 0x06, 0x26, 0x4d, 0xd9, 0x49, - 0x75, 0xbb, 0xd5, 0x69, 0x20, 0xfb, 0x85, 0x12, 0xc9, 0xac, 0xfc, 0x05, - 0xad, 0x57, 0xa9, 0x58, 0xcd, 0xfd, 0xbe, 0x64, 0x31, 0x50, 0x4d, 0xa4, - 0x93, 0xb6, 0x23, 0x3b, 0xfd, 0xd9, 0xdb, 0x46, 0xdd, 0x1f, 0x07, 0x54, - 0xc2, 0xc2, 0xd6, 0xad, 0xf6, 0x21, 0x39, 0xa1, 0x96, 0x53, 0x12, 0x46, - 0x5a, 0xc8, 0xf3, 0xf8, 0xe2, 0xa3, 0xd0, 0x29, 0x3f, 0x30, 0xca, 0x0b, - 0x57, 0xab, 0xcf, 0x1e, 0x08, 0x59, 0x3d, 0x41, 0x6a, 0xf7, 0xb2, 0xfc, - 0xff, 0x33, 0x46, 0xd1, 0x1a, 0xa6, 0x91, 0x54, 0xca, 0x27, 0x5a, 0x94, - 0x13, 0xf4, 0xf0, 0xcf, 0x58, 0xe0, 0x96, 0x50, 0xda, 0xe6, 0x91, 0xc7, - 0x8d, 0x14, 0x5b, 0xc1, 0xeb, 0x4a, 0x96, 0xf1, 0xa5, 0x43, 0xf6, 0x29, - 0x91, 0xb9, 0xb9, 0x67, 0x3f, 0x31, 0xd7, 0x08, 0xe6, 0x2b, 0xfb, 0x43, - 0x56, 0x39, 0x4e, 0xf9, 0x02, 0x8e, 0x96, 0x1f, 0xa3, 0x3c, 0xae, 0x55, - 0x03, 0x05, 0x9a, 0x39, 0xbe, 0xf7, 0x67, 0xa1, 0x6b, 0x2f, 0x42, 0x45, - 0x9b, 0x45, 0x8f, 0x53, 0x1f, 0x96, 0x42, 0x54, 0xd2, 0x5b, 0xf0, 0x17, - 0x94, 0x41, 0xaf, 0xd4, 0xc6, 0x37, 0x5f, 0xc0, 0xbd, 0xe3, 0x44, 0x8d, - 0xc1, 0x69, 0x64, 0x2a, 0xe7, 0x08, 0xe5, 0x18, 0x92, 0x53, 0xfc, 0xed, - 0xd3, 0x69, 0x94, 0x6b, 0x10, 0x0b, 0x5e, 0x91, 0x38, 0x4b, 0xa5, 0x19, - 0x3a, 0x6a, 0x2e, 0x5a, 0xa2, 0x6f, 0x34, 0x2c, 0x7b, 0x5d, 0x53, 0x33, - 0x77, 0x46, 0xf8, 0x4a, 0xa2, 0x8d, 0x55, 0x67, 0xa8, 0xbd, 0xc6, 0x3c, - 0x5d, 0x47, 0xeb, 0x99, 0xed, 0xdc, 0xae, 0xcf, 0xec, 0xbe, 0x40, 0x60, - 0xfc, 0x36, 0x5c, 0x93, 0x95, 0x64, 0xd8, 0x47, 0x14, 0xe2, 0x1e, 0xa2, - 0xd4, 0xd4, 0xdf, 0xd9, 0x23, 0x18, 0xf2, 0x99, 0xe8, 0xe4, 0x2a, 0x3b, - 0xec, 0x2e, 0x28, 0xa8, 0x04, 0x74, 0x04, 0xa4, 0x32, 0xa6, 0x49, 0xf9, - 0x33, 0x6c, 0xa8, 0x1d, 0xb2, 0xbb, 0x57, 0xe4, 0xcf, 0xf2, 0x9e, 0x74, - 0x8d, 0xf7, 0x22, 0xaa, 0x0d, 0x8a, 0x2f, 0x34, 0x72, 0x33, 0xec, 0xdf, - 0x46, 0x57, 0x6c, 0x97, 0x94, 0xad, 0x06, 0x88, 0xeb, 0x20, 0xec, 0x79, - 0x44, 0xe1, 0xbc, 0xf8, 0xbd, 0xeb, 0x99, 0xe3, 0xaf, 0xfe, 0xc5, 0xb5, - 0xfa, 0x31, 0x75, 0x62, 0xff, 0x2a, 0x2a, 0x1b, 0xce, 0xad, 0xa8, 0xc8, - 0x3c, 0x54, 0x23, 0xf9, 0x9e, 0x2d, 0xe2, 0xa4, 0x4f, 0x5b, 0x4d, 0xb8, - 0x4f, 0xc6, 0xb3, 0xc6, 0xef, 0x66, 0x54, 0x31, 0xab, 0xd3, 0xf0, 0xb9, - 0xfa, 0xb6, 0x15, 0xe6, 0xdb, 0x4b, 0x51, 0x4d, 0x77, 0xa5, 0x3d, 0x4e, - 0xd9, 0xc9, 0xdb, 0x95, 0x31, 0x1d, 0x4d, 0x37, 0xe0, 0x34, 0xd3, 0xf3, - 0x20, 0x6b, 0xb8, 0x16, 0x0b, 0x4e, 0x55, 0x96, 0x56, 0x1e, 0xa7, 0xe8, - 0xc6, 0x3a, 0x08, 0x49, 0xa1, 0x16, 0x46, 0xc9, 0x43, 0xcb, 0x8f, 0x28, - 0x4a, 0x78, 0xaa, 0xf9, 0x6c, 0x74, 0xc8, 0x0b, 0xce, 0x13, 0x2c, 0xef, - 0xfe, 0x73, 0x42, 0xa7, 0xbc, 0x3d, 0xc9, 0xf2, 0xaf, 0x1c, 0x32, 0xdb, - 0xb2, 0x15, 0x70, 0x6b, 0x9b, 0x6e, 0x6f, 0x6e, 0xf7, 0x95, 0xea, 0x3e, - 0xd0, 0xb1, 0x2a, 0xbe, 0x8c, 0x66, 0x4e, 0xe9, 0x29, 0xe3, 0x35, 0xde, - 0xbf, 0x44, 0xbc, 0x5e, 0x56, 0x8b, 0xb3, 0xd4, 0xdf, 0xf5, 0x4e, 0x2e, - 0xeb, 0xe6, 0x8e, 0x58, 0xe2, 0xfd, 0xe7, 0x27, 0xff, 0x07, 0x49, 0x20, - 0xdd, 0xcf, 0xe4, 0xd7, 0x5c, 0x5f, 0x1f, 0xcc, 0xeb, 0x29, 0xeb, 0x34, - 0xac, 0xd6, 0xb6, 0xf8, 0xae, 0xdf, 0x11, 0x58, 0xd5, 0xea, 0xf1, 0x76, - 0xe5, 0x4d, 0x51, 0x72, 0xd4, 0x5e, 0x1e, 0x0f, 0xfd, 0x2e, 0xbe, 0x8e, - 0x07, 0x1a, 0x1f, 0x99, 0x4d, 0x73, 0x70, 0xe1, 0x41, 0xb4, 0x20, 0x10, - 0x75, 0x0f, 0xc8, 0x69, 0x5f, 0x6c, 0x20, 0x2b, 0xc8, 0xfd, 0xe9, 0x4c, - 0xf4, 0x6f, 0x6a, 0xe0, 0x1a, 0xb5, 0xec, 0x2e, 0xf5, 0x25, 0x6d, 0x56, - 0x56, 0xb9, 0x42, 0xca, 0x70, 0x72, 0xe5, 0x41, 0x07, 0x4f, 0x41, 0x25, - 0xea, 0x0a, 0x5d, 0xe1, 0x0a, 0xd5, 0x6f, 0x35, 0x50, 0xcc, 0x27, 0x53, - 0x5f, 0x31, 0x1c, 0xee, 0xae, 0x26, 0xc8, 0xc4, 0x4f, 0x9b, 0xf5, 0xf6, - 0x4d, 0x19, 0xb9, 0xc4, 0x55, 0xcd, 0xe5, 0x8a, 0xe9, 0x45, 0xec, 0xf2, - 0xf9, 0x33, 0x4d, 0xba, 0x57, 0x8f, 0xd6, 0xf5, 0xf7, 0x92, 0xb3, 0xd3, - 0x65, 0x39, 0x07, 0x04, 0x92, 0x2f, 0x70, 0x99, 0x97, 0x96, 0x60, 0xe5, - 0x92, 0x60, 0xc3, 0x72, 0x1e, 0xc7, 0xe6, 0x1d, 0xbb, 0x5b, 0xd5, 0x64, - 0x1b, 0x36, 0x45, 0xb8, 0xcb, 0x42, 0xe7, 0x26, 0x45, 0x65, 0xc8, 0x04, - 0x1c, 0x05, 0x9b, 0x48, 0xe3, 0x93, 0x8e, 0xb2, 0x1c, 0x6a, 0xab, 0x60, - 0xc2, 0xa6, 0x1a, 0x71, 0xd5, 0x2c, 0xb8, 0xe9, 0x9e, 0x66, 0x8d, 0xb6, - 0xb1, 0x99, 0x90, 0x9c, 0x1b, 0xc9, 0x44, 0x6d, 0x31, 0xbb, 0x62, 0x6e, - 0x46, 0xcc, 0xd7, 0x47, 0x3a, 0x40, 0x63, 0x33, 0x34, 0x4f, 0x50, 0x3c, - 0x94, 0x97, 0xe9, 0xe8, 0x3a, 0xf7, 0x2d, 0x2d, 0x9c, 0xb6, 0x5d, 0x52, - 0xbd, 0xa9, 0x2d, 0x42, 0xfc, 0xe8, 0x70, 0x09, 0x48, 0xd0, 0x36, 0x0b, - 0x3d, 0x2b, 0x9f, 0xe2, 0x4c, 0xdf, 0xf3, 0x57, 0x73, 0x55, 0xf7, 0x34, - 0xb8, 0x6b, 0x44, 0x6f, 0xf6, 0x6d, 0xcf, 0x93, 0x09, 0x14, 0xac, 0x8f, - 0xde, 0xce, 0x5f, 0x05, 0x04, 0x9f, 0xc7, 0x05, 0x5f, 0xdd, 0x2e, 0xfc, - 0x53, 0xec, 0x9e, 0xdb, 0xa8, 0xa2, 0xc7, 0x53, 0x5c, 0x9a, 0x4d, 0xb6, - 0x6f, 0xa5, 0xc6, 0xf3, 0xc5, 0xa4, 0x56, 0x62, 0xdc, 0x75, 0xe4, 0x0b, - 0xb0, 0xcc, 0x38, 0xde, 0x2d, 0xbb, 0xbc, 0x0b, 0xc6, 0xab, 0xac, 0xac, - 0x46, 0xce, 0x1e, 0xe6, 0x47, 0x6c, 0x6e, 0x8e, 0x00, 0x00, 0xa0, 0xae, - 0x1e, 0x1d, 0xaa, 0x22, 0xaf, 0x34, 0xc7, 0x26, 0x37, 0x01, 0x46, 0x25, - 0x9c, 0x5f, 0x92, 0xef, 0xda, 0x07, 0x64, 0x62, 0xe4, 0xf7, 0x4c, 0xa2, - 0x41, 0xf1, 0x10, 0xe0, 0xe5, 0x73, 0x72, 0xe1, 0xf8, 0x66, 0x19, 0x58, - 0xa9, 0xdf, 0xb1, 0x41, 0xcb, 0xb3, 0xc4, 0xe6, 0x21, 0xbe, 0x17, 0x26, - 0xa9, 0x68, 0x96, 0xde, 0x5d, 0xba, 0x8f, 0x1b, 0x09, 0x00, 0x39, 0x0e, - 0xc2, 0x8d, 0x31, 0x61, 0xfe, 0x9e, 0x60, 0x05, 0xf3, 0x72, 0xdf, 0x78, - 0x14, 0x5a, 0x1b, 0x74, 0xa1, 0x23, 0xa7, 0x6e, 0x93, 0x76, 0xfa, 0x4a, - 0x73, 0xa1, 0x3b, 0xda, 0x0b, 0x06, 0xdd, 0xfc, 0x2f, 0xef, 0x0a, 0x38, - 0x03, 0xbf, 0xbb, 0x12, 0x29, 0x6b, 0xec, 0x68, 0xc7, 0xa6, 0xf9, 0x72, - 0xbc, 0xdb, 0xeb, 0x4e, 0x8f, 0x5f, 0x3a, 0xa9, 0x06, 0x4e, 0x3c, 0xf4, - 0x3b, 0xe0, 0x98, 0x9b, 0x77, 0x57, 0x0f, 0x39, 0x08, 0x43, 0x3f, 0x9b, - 0x76, 0x11, 0xd3, 0x38, 0xb6, 0x1f, 0x1e, 0xfe, 0xbb, 0x16, 0x37, 0x24, - 0x15, 0xf7, 0x8e, 0x61, 0x3d, 0xf5, 0x60, 0xab, 0x46, 0x49, 0xd6, 0xb2, - 0x8e, 0x35, 0xd5, 0x66, 0x20, 0x1f, 0xad, 0xf5, 0x95, 0xc3, 0x3e, 0xaa, - 0xda, 0x12, 0x1f, 0x33, 0xf4, 0xc0, 0xd9, 0x9e, 0x09, 0x76, 0x8b, 0x2f, - 0x35, 0xe2, 0x58, 0x09, 0x36, 0xf1, 0x03, 0xbc, 0xc2, 0x54, 0x67, 0x29, - 0x00, 0x3b, 0xf0, 0x24, 0xdf, 0xa0, 0x92, 0x71, 0xc3, 0x98, 0xe8, 0x5d, - 0xbe, 0xc7, 0xe8, 0x6f, 0x2f, 0x05, 0x89, 0x9f, 0xa1, 0x63, 0x29, 0x12, - 0x94, 0xff, 0xc7, 0x4c, 0xec, 0x98, 0x0e, 0xb8, 0xeb, 0x9e, 0x6d, 0x1e, - 0x4f, 0x4a, 0x1e, 0x41, 0xb0, 0xf9, 0x40, 0x8b, 0xdd, 0xd9, 0xa6, 0x1b, - 0xd4, 0x6d, 0xaf, 0x5b, 0x14, 0x68, 0xfd, 0x96, 0x5d, 0x0d, 0xad, 0x46, - 0x03, 0xf8, 0xd7, 0x13, 0x1d, 0xf3, 0x47, 0xbe, 0x46, 0x3d, 0xc7, 0xdd, - 0xa9, 0x60, 0x05, 0x15, 0xef, 0x9d, 0xa4, 0xb8, 0xde, 0xf2, 0x41, 0xe2, - 0x07, 0x1d, 0xcb, 0xe8, 0xf3, 0x9c, 0x9c, 0x5e, 0xcd, 0xec, 0x53, 0x39, - 0xf2, 0x62, 0x3b, 0x69, 0x3a, 0x29, 0xc7, 0xb3, 0x57, 0xce, 0x58, 0xd6, - 0x55, 0xf8, 0xc2, 0xf1, 0x16, 0xf3, 0x33, 0x3f, 0xf2, 0xaa, 0x63, 0x42, - 0x27, 0x01, 0x22, 0x5a, 0x1e, 0x8d, 0xa5, 0x33, 0x34, 0x29, 0x12, 0xf6, - 0x07, 0x22, 0xfd, 0xbb, 0x72, 0x60, 0x2a, 0xf5, 0xec, 0x71, 0xfe, 0xd7, - 0xc1, 0xf5, 0xdf, 0x97, 0x3e, 0x4a, 0x9a, 0x97, 0x6f, 0x56, 0xf1, 0xd4, - 0xba, 0x29, 0x09, 0x46, 0x3f, 0x10, 0xdc, 0x2d, 0xb2, 0x04, 0x32, 0x38, - 0xa3, 0xc7, 0x75, 0x95, 0x16, 0xd6, 0x12, 0x44, 0x7a, 0xd3, 0x18, 0xb3, - 0x51, 0x72, 0x63, 0xb8, 0xae, 0x9b, 0xf1, 0xec, 0x17, 0xe4, 0x2d, 0xed, - 0x29, 0x05, 0x63, 0xd7, 0x01, 0xf4, 0xf5, 0xc1, 0x6d, 0x13, 0x5f, 0x5c, - 0x73, 0x11, 0xc9, 0x53, 0xf4, 0xda, 0x90, 0xa2, 0x1c, 0x0b, 0x1d, 0x37, - 0x28, 0xa1, 0x06, 0x65, 0xd3, 0x49, 0x5d, 0x07, 0x1f, 0x93, 0xa9, 0x98, - 0xc5, 0xa5, 0x13, 0xc5, 0xac, 0xda, 0x64, 0x25, 0x77, 0x9a, 0xd5, 0xa9, - 0xe9, 0x3a, 0x77, 0x62, 0xac, 0xf2, 0x76, 0xf4, 0x03, 0xb6, 0x03, 0x6e, - 0xef, 0x97, 0x13, 0x1c, 0xd1, 0xb9, 0x73, 0x12, 0xf7, 0x10, 0xbd, 0x1c, - 0xa1, 0xe7, 0xed, 0xd7, 0xa0, 0xd7, 0x53, 0xa1, 0x21, 0xf1, 0x5f, 0x1e, - 0xec, 0x36, 0x0d, 0x2c, 0xce, 0x74, 0x4a, 0x0c, 0x97, 0x5a, 0x76, 0x62, - 0x18, 0x9c, 0xc3, 0xc1, 0xc4, 0x5e, 0xf1, 0xfa, 0xe6, 0x4b, 0x15, 0xda, - 0xfa, 0xfd, 0xe9, 0x98, 0x09, 0xc3, 0x67, 0x63, 0x1f, 0x28, 0x37, 0xf0, - 0x59, 0x4b, 0x4b, 0xa3, 0xd1, 0x41, 0x94, 0xa6, 0x05, 0xb0, 0x93, 0xee, - 0x41, 0xa4, 0xce, 0xee, 0xea, 0xc4, 0x43, 0x6e, 0xab, 0x65, 0x70, 0xe3, - 0x4d, 0xf1, 0x02, 0xf5, 0x0f, 0xd5, 0x5e, 0xfd, 0x03, 0xcd, 0x22, 0x27, - 0x90, 0xf4, 0x98, 0xa2, 0xc0, 0xb4, 0xd5, 0x04, 0xfa, 0x75, 0x22, 0x4c, - 0xe7, 0xdd, 0xef, 0x3a, 0x1d, 0xb6, 0x00, 0x58, 0xcd, 0x5a, 0xbc, 0x12, - 0xea, 0x5a, 0xda, 0xa9, 0x18, 0x0e, 0xff, 0x51, 0xc4, 0xaf, 0xc8, 0x95, - 0xfb, 0x92, 0xdf, 0x99, 0xc9, 0x4e, 0xfe, 0xb1, 0xb0, 0xca, 0xa1, 0xba, - 0x90, 0xc8, 0x07, 0x34, 0x52, 0x6d, 0xd8, 0x05, 0x72, 0x2e, 0xee, 0x98, - 0xc0, 0x1e, 0x25, 0xb3, 0xa2, 0xb4, 0x9c, 0xa5, 0xdc, 0xd3, 0xb1, 0xdf, - 0x17, 0xd9, 0xda, 0xe9, 0x5d, 0x41, 0xca, 0xc7, 0xe4, 0x94, 0x0d, 0x67, - 0xba, 0x9c, 0xcf, 0x52, 0xf0, 0x00, 0x54, 0xe0, 0xbd, 0x3c, 0xc7, 0xb9, - 0x6a, 0x11, 0xc6, 0xd1, 0x62, 0xc3, 0xcf, 0xc2, 0x6a, 0x44, 0xeb, 0x41, - 0x43, 0x54, 0xe2, 0xf5, 0xc4, 0x11, 0xd7, 0x6a, 0xf2, 0x76, 0xa9, 0x16, - 0xae, 0xe2, 0x11, 0xfb, 0x04, 0x3d, 0xee, 0xd1, 0x98, 0x30, 0x0b, 0x6b, - 0x8a, 0x6f, 0x45, 0xb7, 0x01, 0x64, 0x46, 0x32, 0x61, 0xd5, 0x05, 0xfa, - 0xb1, 0x14, 0x54, 0x39, 0x13, 0x9b, 0xd5, 0x1d, 0x5c, 0xad, 0xd0, 0x5e, - 0x6d, 0xb3, 0xa1, 0xb3, 0xc5, 0x8d, 0xf8, 0x12, 0xd9, 0x5f, 0x94, 0x27, - 0xdf, 0x30, 0xc8, 0x0e, 0x3a, 0x46, 0x70, 0x5c, 0x4c, 0xaa, 0x24, 0xc3, - 0x50, 0x62, 0x52, 0xc8, 0x63, 0x64, 0xc9, 0x49, 0x74, 0x1c, 0xd2, 0x49, - 0x0f, 0x20, 0x69, 0x53, 0x97, 0x34, 0xc0, 0x92, 0x48, 0x28, 0x7b, 0x64, - 0xca, 0xea, 0x07, 0x6c, 0x63, 0x3e, 0xb6, 0xdb, 0xd5, 0x52, 0x9d, 0x7a, - 0x5f, 0x46, 0xc1, 0xb9, 0x3e, 0xe2, 0xe9, 0xeb, 0x04, 0x65, 0xc0, 0x74, - 0x4b, 0x07, 0x6a, 0x19, 0x4a, 0x9d, 0x05, 0xa0, 0xba, 0xae, 0x74, 0xef, - 0x62, 0x09, 0x57, 0x36, 0xe5, 0x9c, 0x54, 0x59, 0x3d, 0x04, 0xf0, 0xfb, - 0x6f, 0x89, 0x13, 0x1f, 0x1f, 0x88, 0x03, 0x6b, 0x0c, 0xeb, 0x53, 0xac, - 0x3a, 0x18, 0xa4, 0x93, 0xcc, 0x4f, 0xf5, 0x92, 0x44, 0x23, 0x9e, 0x67, - 0xf0, 0xf5, 0x2f, 0xb9, 0xc9, 0x34, 0x76, 0x97, 0x1d, 0x94, 0x75, 0x3f, - 0x47, 0x97, 0xe0, 0x30, 0xcc, 0xff, 0xd2, 0x7a, 0x3b, 0x04, 0xa7, 0xa5, - 0x62, 0x9e, 0xe4, 0x8f, 0xd8, 0x62, 0xee, 0x1d, 0x1c, 0xff, 0xad, 0x18, - 0xc9, 0x66, 0x47, 0x36, 0xfb, 0x2e, 0x74, 0x2a, 0xe7, 0x5f, 0xb2, 0x12, - 0xd2, 0x9e, 0xae, 0x2b, 0x92, 0xb8, 0x53, 0x66, 0x22, 0x5c, 0xa8, 0xaf, - 0x4f, 0x29, 0xab, 0x64, 0x50, 0x09, 0xe9, 0x2f, 0x2e, 0x62, 0x2e, 0x0e, - 0x8a, 0xd6, 0xeb, 0xa7, 0x5d, 0x3e, 0x9e, 0xe1, 0x39, 0x52, 0x13, 0x57, - 0x54, 0x5c, 0x78, 0xed, 0xb3, 0xfc, 0x5f, 0xa1, 0xf3, 0x2a, 0x77, 0x90, - 0xa9, 0x09, 0xa1, 0x05, 0x3b, 0xa9, 0x6a, 0xf5, 0xc4, 0xfa, 0x97, 0x79, - 0x64, 0x57, 0x1a, 0xf1, 0x74, 0xe5, 0x16, 0x93, 0xa9, 0xef, 0xe6, 0xdf, - 0x36, 0xd2, 0xd0, 0xe6, 0xb8, 0xdd, 0xe9, 0x13, 0x4c, 0xcd, 0x22, 0x98, - 0xc1, 0x94, 0xbb, 0x04, 0x2a, 0x4a, 0x69, 0x10, 0x5a, 0xcb, 0x1d, 0x9e, - 0xc4, 0x3d, 0x6d, 0x0e, 0xe0, 0x12, 0xb4, 0xe1, 0x6c, 0x55, 0x6f, 0xa3, - 0xf5, 0x1b, 0x0c, 0xe5, 0x1c, 0x99, 0x8b, 0x23, 0x23, 0xbc, 0x33, 0xe4, - 0xd4, 0x15, 0xfd, 0xcc, 0x90, 0x87, 0xb5, 0x0e, 0x24, 0xba, 0x20, 0x1b, - 0xcf, 0x67, 0x98, 0x1a, 0x35, 0xe7, 0xc3, 0x95, 0x29, 0xd6, 0xd2, 0x4f, - 0xe4, 0x14, 0xd5, 0xa1, 0x93, 0xff, 0x24, 0x0e, 0xfc, 0xb7, 0xd6, 0xde, - 0x05, 0xc5, 0x2f, 0xaa, 0x92, 0xd4, 0xd8, 0xac, 0x8f, 0x67, 0x45, 0xdb, - 0x36, 0x19, 0x15, 0x09, 0x9a, 0x3f, 0x2a, 0x56, 0xd5, 0xa9, 0x26, 0xb6, - 0xcb, 0x19, 0xf3, 0x6a, 0xbb, 0xba, 0xba, 0xa3, 0x68, 0x90, 0x0f, 0xb1, - 0x98, 0x14, 0x33, 0xd8, 0x12, 0xdf, 0xef, 0xe5, 0x01, 0x93, 0xab, 0xf8, - 0x93, 0x40, 0xbd, 0xa0, 0x01, 0x34, 0x54, 0xfd, 0xa0, 0xc4, 0xc3, 0xf3, - 0x6b, 0x90, 0x30, 0xc1, 0xbe, 0xd8, 0xbb, 0xab, 0x71, 0xaa, 0xe5, 0x3b, - 0x2d, 0x5d, 0x6e, 0x00, 0x34, 0xa8, 0x02, 0x34, 0xa9, 0x67, 0x95, 0xcd, - 0xed, 0xa2, 0x25, 0x55, 0xc9, 0x03, 0x1c, 0x30, 0xe7, 0xdf, 0xe6, 0xe7, - 0x2b, 0x5a, 0x9a, 0xcd, 0xa8, 0xf0, 0x4e, 0xe4, 0xd7, 0x90, 0x5f, 0x4e, - 0xbf, 0x5d, 0x68, 0x12, 0x1c, 0x4c, 0x68, 0x03, 0x9c, 0x49, 0xcb, 0xe6, - 0xc4, 0xfd, 0xad, 0xd5, 0xa8, 0xd8, 0xda, 0x2f, 0x13, 0xbc, 0x42, 0x61, - 0xa5, 0x0a, 0x1a, 0xe9, 0x5e, 0x5c, 0x01, 0x7c, 0xca, 0x73, 0x6f, 0x32, - 0xc1, 0x96, 0x24, 0x9d, 0x12, 0x20, 0x11, 0x6a, 0xf6, 0xbc, 0xff, 0x6a, - 0xc1, 0x58, 0x0d, 0xb9, 0xad, 0xc5, 0xde, 0x69, 0x37, 0xbe, 0xd9, 0x93, - 0xcc, 0x2b, 0xe9, 0x13, 0x45, 0xa0, 0x6c, 0x3f, 0x44, 0x34, 0xaf, 0x43, - 0x6d, 0xae, 0xef, 0xb2, 0x65, 0x03, 0xc1, 0xef, 0x10, 0x1e, 0xd8, 0x6e, - 0xb5, 0xb9, 0x03, 0xd8, 0x6e, 0x2f, 0x53, 0xe6, 0xc0, 0xaf, 0x44, 0xd2, - 0xd8, 0x15, 0x56, 0x15, 0x59, 0xd6, 0xd4, 0xe4, 0x1a, 0x25, 0xd5, 0xcf, - 0xe7, 0x6a, 0x55, 0xd4, 0xf8, 0x42, 0x4c, 0xcb, 0x9a, 0x48, 0x4d, 0x27, - 0x61, 0x4c, 0x36, 0x2b, 0xcb, 0x10, 0xba, 0xf7, 0xe3, 0x23, 0x27, 0xc5, - 0x6a, 0x1b, 0x94, 0x69, 0x64, 0xb1, 0x8c, 0xdb, 0xd4, 0x0d, 0x32, 0x3e, - 0x58, 0x73, 0xa8, 0x2f, 0x3d, 0x22, 0xd9, 0x0d, 0x2a, 0x52, 0xf0, 0xdd, - 0xeb, 0x21, 0x42, 0xc7, 0x59, 0x96, 0x09, 0x93, 0x5a, 0x70, 0xc3, 0x21, - 0x5f, 0xce, 0xc2, 0xdd, 0xcf, 0x61, 0xed, 0x1c, 0xfb, 0x2f, 0x57, 0xf7, - 0x31, 0xb8, 0x3e, 0x92, 0x29, 0xd4, 0x47, 0x6a, 0x19, 0x66, 0x00, 0xc2, - 0xc4, 0x6c, 0xb5, 0xc5, 0x68, 0x24, 0xa8, 0x64, 0x26, 0x72, 0x43, 0x20, - 0x9f, 0xf1, 0x3f, 0xac, 0x64, 0xb5, 0x12, 0x26, 0x13, 0x76, 0x52, 0x05, - 0xda, 0x57, 0xe3, 0x53, 0x73, 0x30, 0x21, 0x27, 0x75, 0x8d, 0x37, 0xd1, - 0x77, 0x40, 0x97, 0x2a, 0xb7, 0x0b, 0x2e, 0x9e, 0x4c, 0x36, 0x75, 0x44, - 0x15, 0xdb, 0x96, 0x70, 0xf9, 0x33, 0x9a, 0x1e, 0x6e, 0x13, 0x05, 0x38, - 0x2c, 0xbf, 0x0a, 0xdd, 0x2b, 0x2b, 0x38, 0x77, 0xa9, 0x00, 0x2d, 0x5e, - 0xee, 0x4b, 0xf3, 0x20, 0x7a, 0x90, 0x97, 0x44, 0xdf, 0x55, 0xfd, 0x50, - 0xe3, 0x24, 0x25, 0xa9, 0xd9, 0x3f, 0x6d, 0x09, 0x32, 0x67, 0xb5, 0x43, - 0xf1, 0xc7, 0xa7, 0xfb, 0x92, 0xde, 0xc3, 0xbf, 0x64, 0x6b, 0x35, 0xda, - 0x08, 0x94, 0x68, 0xb0, 0xc8, 0x3f, 0xb5, 0x9f, 0x15, 0x05, 0xff, 0x6c, - 0xbc, 0x22, 0x61, 0xf4, 0x67, 0xf8, 0x1f, 0x2e, 0x91, 0xc8, 0x12, 0xdc, - 0xcb, 0x22, 0x05, 0xb8, 0xab, 0x0d, 0x0e, 0xd7, 0x04, 0x8e, 0x32, 0x0e, - 0xfe, 0x72, 0x79, 0xc3, 0xba, 0xd8, 0x68, 0x3e, 0x5d, 0xab, 0xa0, 0xf8, - 0x26, 0x57, 0xe4, 0x20, 0x91, 0x0a, 0xde, 0x52, 0x95, 0xbc, 0xb7, 0x71, - 0x50, 0xe4, 0x3f, 0x07, 0x4c, 0xa8, 0x6a, 0xb6, 0xa0, 0x95, 0xe2, 0x31, - 0x8f, 0x5f, 0xfa, 0xdd, 0xee, 0x02, 0x23, 0x56, 0xf1, 0xdd, 0x1a, 0xa6, - 0xa0, 0x2d, 0x46, 0x36, 0x6c, 0x79, 0xe8, 0x67, 0x43, 0xdd, 0xe7, 0x2e, - 0x25, 0xda, 0x35, 0x6f, 0x63, 0xf1, 0x2c, 0x6c, 0x61, 0xaa, 0xb7, 0x51, - 0x91, 0xa1, 0x7c, 0x54, 0x9a, 0xf6, 0x3c, 0x3f, 0xa8, 0xba, 0x4d, 0xee, - 0xb6, 0xab, 0xa5, 0x05, 0xc6, 0xb6, 0xe8, 0x2f, 0x1b, 0x99, 0xb0, 0x45, - 0x3e, 0xc3, 0x50, 0x26, 0x0b, 0x10, 0x61, 0x5a, 0xc6, 0x25, 0x2d, 0x07, - 0xb6, 0x28, 0x59, 0xf3, 0xb4, 0x02, 0x61, 0xa0, 0xd0, 0x0a, 0xae, 0xd6, - 0x3c, 0xcc, 0x5f, 0xfb, 0xc0, 0xfd, 0xeb, 0x7b, 0xe2, 0x66, 0xc5, 0x98, - 0x70, 0x50, 0x31, 0x3a, 0x12, 0x45, 0xf4, 0x1c, 0xba, 0xa6, 0x92, 0x51, - 0xae, 0x68, 0xec, 0xb0, 0x1a, 0xd9, 0x45, 0x00, 0xd6, 0x9e, 0xad, 0x64, - 0xfe, 0xd9, 0xfb, 0xcc, 0x57, 0xff, 0x9e, 0xa3, 0x71, 0xe7, 0x7a, 0xaf, - 0x26, 0x31, 0x31, 0x6a, 0x41, 0xa4, 0x4d, 0x68, 0xbc, 0xcb, 0xfa, 0xb4, - 0x3a, 0x1c, 0x3a, 0x8f, 0xcd, 0xc1, 0x95, 0xb2, 0x46, 0x72, 0xf7, 0xfc, - 0x20, 0xe2, 0x2f, 0x0f, 0xbd, 0x74, 0xe1, 0x2a, 0xd5, 0xf6, 0xe9, 0xe1, - 0x45, 0x7d, 0x95, 0xb0, 0x49, 0xce, 0xe8, 0x53, 0x69, 0x46, 0x9d, 0x03, - 0x5f, 0x15, 0x2e, 0x92, 0x4c, 0xb7, 0xf1, 0x43, 0x67, 0x8a, 0x43, 0xc6, - 0x90, 0xec, 0xb5, 0x5d, 0xd5, 0x64, 0x16, 0x6e, 0xf0, 0xad, 0x4e, 0xf0, - 0x56, 0xe8, 0x77, 0xd5, 0x47, 0x47, 0x41, 0xc9, 0x98, 0x3a, 0xcb, 0xe0, - 0x01, 0x77, 0x93, 0x15, 0xe0, 0xd3, 0x93, 0xbe, 0xe1, 0x97, 0xe0, 0x21, - 0x60, 0x2b, 0xf1, 0x4a, 0x62, 0x29, 0x11, 0xe9, 0x61, 0x55, 0xc4, 0x57, - 0x04, 0xa8, 0xb3, 0xb3, 0x61, 0xd7, 0xa6, 0xce, 0x50, 0xd2, 0xc3, 0x38, - 0xda, 0xc2, 0x23, 0x67, 0x37, 0x09, 0xa7, 0xfd, 0x29, 0xdc, 0xcc, 0x52, - 0x65, 0xea, 0x3f, 0xcc, 0x67, 0x5e, 0x3b, 0xd4, 0x59, 0x59, 0x12, 0x9b, - 0xf1, 0xd2, 0x43, 0x46, 0x54, 0xcd, 0xb9, 0xbe, 0x71, 0xb6, 0x6d, 0x6a, - 0x62, 0xc5, 0x59, 0xc1, 0x21, 0xf7, 0x4c, 0x91, 0x64, 0xe0, 0xd7, 0xd9, - 0x34, 0x60, 0x0d, 0xb2, 0x93, 0xd8, 0xd3, 0x01, 0x8b, 0xf3, 0x9c, 0x6c, - 0xff, 0x63, 0xca, 0xd2, 0xf4, 0x76, 0xe3, 0x60, 0x52, 0x5c, 0x0e, 0xa3, - 0x13, 0xc8, 0xd9, 0xa7, 0x13, 0x6d, 0x1b, 0x29, 0xc0, 0xb1, 0x54, 0x31, - 0x33, 0x55, 0x44, 0x0a, 0x0a, 0x96, 0x3f, 0xf0, 0xb2, 0x64, 0x23, 0xa1, - 0xc8, 0x08, 0x01, 0x94, 0x2f, 0xc8, 0x0a, 0xfb, 0x93, 0x38, 0xe4, 0xc1, - 0xd9, 0xea, 0x46, 0x96, 0xdd, 0x5d, 0x62, 0xfc, 0xb0, 0x4d, 0x17, 0xe8, - 0xa0, 0xd4, 0x35, 0x98, 0x65, 0xb0, 0x27, 0x97, 0xbc, 0xe8, 0x48, 0x38, - 0x90, 0x9b, 0x6e, 0xf1, 0xd2, 0x17, 0x1b, 0xbf, 0x03, 0xc6, 0xa3, 0x42, - 0xaf, 0xdc, 0x44, 0x9d, 0x9e, 0x69, 0x67, 0x33, 0x61, 0xfb, 0x96, 0xfa, - 0xff, 0xf4, 0xa8, 0x3c, 0xb6, 0x42, 0xd2, 0x4c, 0xc0, 0xa8, 0x2a, 0x4b, - 0x37, 0x78, 0x41, 0x94, 0xf6, 0x04, 0xb9, 0x54, 0xe4, 0x2b, 0xfc, 0xed, - 0xf5, 0xf7, 0x62, 0x23, 0x44, 0xc4, 0xd7, 0x5a, 0xeb, 0xc2, 0x3d, 0x4c, - 0x41, 0x22, 0xa0, 0xe3, 0x22, 0xbc, 0x91, 0x69, 0x37, 0x3f, 0x94, 0xfd, - 0x07, 0xa7, 0x6e, 0x53, 0x27, 0xdc, 0xb0, 0x14, 0x8d, 0x0a, 0x08, 0x31, - 0xba, 0xf0, 0xd0, 0xda, 0xa6, 0x7a, 0xc0, 0x4c, 0x9d, 0x3b, 0x8f, 0xee, - 0x11, 0xc7, 0x9f, 0xc9, 0xcc, 0x4c, 0x26, 0x51, 0xb4, 0x10, 0xde, 0xc2, - 0xa3, 0xe0, 0xaa, 0x7c, 0x9c, 0x27, 0x8d, 0x04, 0x8e, 0xfc, 0xe4, 0x68, - 0x93, 0xf9, 0x67, 0x28, 0xa0, 0xe6, 0xca, 0xbd, 0x5a, 0x64, 0x98, 0x9f, - 0xe3, 0x7b, 0x16, 0x5d, 0x61, 0xcc, 0x4c, 0x64, 0x04, 0x1b, 0xcc, 0xa6, - 0xa2, 0x31, 0x28, 0xa2, 0xac, 0xd0, 0xce, 0x40, 0x19, 0xe7, 0xf9, 0xea, - 0xc5, 0x98, 0x50, 0x16, 0x38, 0xad, 0x58, 0x21, 0x2e, 0x10, 0x48, 0x4f, - 0xe7, 0xc0, 0xc0, 0x6c, 0xcd, 0xe2, 0xc3, 0xcd, 0xc5, 0xfc, 0x26, 0x91, - 0xea, 0xcf, 0x52, 0x97, 0x9f, 0xdc, 0x2c, 0x45, 0xd8, 0x50, 0xf8, 0x75, - 0xa2, 0x93, 0x52, 0x2b, 0x23, 0xd3, 0x30, 0x9d, 0xa7, 0xf7, 0xbb, 0xc2, - 0xd2, 0xb7, 0x9d, 0xec, 0xf9, 0x9a, 0xec, 0x3e, 0xc0, 0xce, 0x64, 0xb8, - 0xf5, 0x41, 0x4e, 0x06, 0xa1, 0x25, 0xf2, 0x40, 0xee, 0x07, 0xec, 0x6d, - 0x9a, 0xd0, 0x5c, 0xdd, 0xe9, 0xf5, 0x56, 0xf9, 0x2e, 0xf5, 0xdb, 0x69, - 0xc9, 0x3e, 0xb5, 0x0c, 0xbc, 0x29, 0xa4, 0xa9, 0x55, 0x9b, 0xf6, 0xab, - 0x1f, 0x55, 0x9d, 0x25, 0xd2, 0xde, 0x3f, 0xa0, 0xe5, 0x1c, 0xb3, 0x90, - 0x2f, 0x6c, 0xaf, 0xb5, 0x6d, 0x23, 0x15, 0xab, 0x91, 0x55, 0x5f, 0x02, - 0x20, 0x22, 0x8e, 0xc1, 0x4a, 0x63, 0xa6, 0x5e, 0x85, 0x99, 0x58, 0xdc, - 0xde, 0xb0, 0x76, 0x9f, 0x21, 0x4d, 0xe9, 0x47, 0xcc, 0x3f, 0x02, 0x91, - 0x75, 0x67, 0xe5, 0x6a, 0x2c, 0xc3, 0x69, 0x95, 0x2d, 0x74, 0x77, 0xf7, - 0x1d, 0xe1, 0x12, 0x2b, 0xcf, 0x4c, 0x7b, 0xcf, 0xbe, 0x24, 0x1d, 0x07, - 0x34, 0xd3, 0x67, 0xa8, 0xb9, 0x76, 0x2a, 0x3e, 0xfd, 0xb5, 0xcd, 0xf6, - 0x29, 0x07, 0x4e, 0x17, 0xcf, 0x28, 0xdd, 0x90, 0x4b, 0x17, 0x24, 0x55, - 0xdc, 0x78, 0xe5, 0xf4, 0x97, 0x31, 0x3d, 0xfa, 0x96, 0xe2, 0x99, 0x61, - 0xb1, 0xcb, 0xa4, 0x7b, 0x4e, 0x5d, 0x6a, 0xf8, 0xb2, 0x79, 0xfc, 0xa9, - 0xd9, 0x27, 0x46, 0xdd, 0x52, 0xdf, 0x24, 0x66, 0x1c, 0xa6, 0xbc, 0x18, - 0x13, 0x72, 0x38, 0x53, 0xac, 0x1b, 0x67, 0x1f, 0x30, 0xae, 0x5a, 0xf3, - 0x55, 0xd0, 0xe1, 0x23, 0x9a, 0x46, 0xa4, 0xbb, 0x68, 0x73, 0x30, 0xda, - 0xb7, 0x3b, 0xff, 0xd1, 0x0d, 0xe0, 0xf7, 0xda, 0x36, 0x3a, 0x7a, 0x19, - 0xf5, 0x2e, 0xf4, 0xda, 0xa4, 0x09, 0x94, 0xb8, 0x18, 0xad, 0x6b, 0xf6, - 0x64, 0xbf, 0x2a, 0x04, 0xc6, 0xde, 0x0f, 0x45, 0x27, 0x3a, 0x3d, 0x61, - 0xf5, 0xde, 0x38, 0x1d, 0x23, 0x23, 0x70, 0x00, 0xfc, 0x0c, 0x5c, 0x96, - 0xc1, 0x21, 0x78, 0x25, 0x24, 0x71, 0xd1, 0xe2, 0xe9, 0x1a, 0x2f, 0x48, - 0x4d, 0x09, 0x24, 0x27, 0xe4, 0xe7, 0x42, 0x76, 0x92, 0x93, 0x7a, 0x62, - 0x76, 0xc6, 0xd7, 0xdf, 0xe4, 0x5e, 0x0e, 0xfc, 0x4e, 0x0a, 0x65, 0x63, - 0x51, 0x90, 0xfd, 0x92, 0x5f, 0x9a, 0x49, 0xa9, 0x6c, 0xb1, 0xb6, 0xe6, - 0xab, 0xf7, 0xb9, 0x39, 0xc0, 0xed, 0x1d, 0x65, 0x9c, 0x24, 0x21, 0xc1, - 0x0d, 0xd6, 0x9a, 0xbe, 0xd4, 0x74, 0xa2, 0x70, 0xab, 0x0b, 0x45, 0xf0, - 0xc9, 0xaa, 0xf1, 0x49, 0x0b, 0x6c, 0x20, 0xdc, 0x37, 0x2b, 0x13, 0x68, - 0x48, 0x0e, 0xd8, 0xd1, 0x67, 0xd8, 0xa3, 0x7e, 0xd7, 0xb7, 0x50, 0xc8, - 0x14, 0x58, 0x6a, 0x04, 0xa5, 0x70, 0x22, 0x2d, 0x41, 0xea, 0x28, 0xb7, - 0xf0, 0xde, 0xc4, 0xe4, 0x5b, 0x4d, 0xc1, 0x33, 0x9e, 0x14, 0x32, 0xa8, - 0x9b, 0xc8, 0xd9, 0x5b, 0x95, 0x2a, 0x91, 0x9d, 0xe8, 0x15, 0x19, 0x9b, - 0x38, 0xf3, 0x35, 0x69, 0x3e, 0xd3, 0x4b, 0xcc, 0xf2, 0x94, 0x5a, 0xaf, - 0x91, 0xa4, 0xa1, 0x03, 0x48, 0x5f, 0x6d, 0x16, 0x56, 0x03, 0x5a, 0xcb, - 0x99, 0x19, 0x45, 0x9c, 0xba, 0xc9, 0xbc, 0x5b, 0x0f, 0xf5, 0xde, 0x70, - 0xa3, 0x70, 0x0d, 0x3f, 0x3e, 0x5c, 0x4d, 0x5a, 0x1a, 0x46, 0x1b, 0x44, - 0x4a, 0x73, 0xfa, 0xb1, 0xc4, 0x42, 0x7b, 0x0c, 0x15, 0x0d, 0x35, 0xc4, - 0xa3, 0xea, 0x17, 0xa0, 0x0b, 0xfb, 0x4d, 0x1b, 0x2f, 0x96, 0x1f, 0xaa, - 0xc0, 0xad, 0xdc, 0xf3, 0xb2, 0xb1, 0x44, 0x1f, 0x39, 0xc7, 0x33, 0x18, - 0xad, 0xe1, 0x50, 0x7d, 0xf9, 0x2a, 0x90, 0xf2, 0x06, 0xce, 0x07, 0xae, - 0x9f, 0xbc, 0x4d, 0xae, 0x30, 0xdd, 0x47, 0xa2, 0xd3, 0x6d, 0x0c, 0xc6, - 0xb7, 0xae, 0xf5, 0x38, 0xa3, 0x00, 0x59, 0x6a, 0x00, 0x04, 0xd2, 0x77, - 0x0a, 0x58, 0xc9, 0xaf, 0x1b, 0x59, 0x29, 0xf3, 0xdd, 0x58, 0xcf, 0xa1, - 0x6d, 0xb4, 0x66, 0x23, 0x9f, 0x9b, 0x41, 0x2a, 0xc8, 0x28, 0x34, 0x77, - 0x3a, 0x1f, 0xa5, 0xde, 0x4b, 0x3f, 0xc7, 0x19, 0xf5, 0xdb, 0x98, 0xc4, - 0x6c, 0x2f, 0x34, 0x20, 0xc9, 0x52, 0x16, 0x60, 0xbc, 0x04, 0xd5, 0xff, - 0x4b, 0x07, 0x28, 0x5a, 0x3a, 0x48, 0x5b, 0x96, 0xee, 0x1f, 0xf1, 0xb4, - 0x9b, 0xb5, 0x64, 0xde, 0x1c, 0xd5, 0x3c, 0x1b, 0x98, 0x11, 0xc7, 0x0b, - 0x97, 0x00, 0x2f, 0x8f, 0xf9, 0x24, 0x4d, 0xba, 0x75, 0x6a, 0xce, 0xd8, - 0x7a, 0xee, 0x02, 0xd5, 0x19, 0xd6, 0x26, 0x40, 0xa7, 0x78, 0x76, 0x1a, - 0x17, 0xc2, 0xe6, 0x5a, 0x6e, 0x24, 0xb1, 0x17, 0xf8, 0x9f, 0xdc, 0x64, - 0xf0, 0x59, 0xc5, 0xfc, 0x4c, 0xbb, 0x3d, 0x3f, 0x70, 0x2c, 0x0d, 0xf5, - 0x6c, 0x96, 0x46, 0x1a, 0x1e, 0x5f, 0xd1, 0x3a, 0x00, 0x9a, 0x9d, 0x63, - 0xe6, 0xd1, 0xa2, 0x5a, 0x4a, 0x50, 0xa8, 0xd5, 0x91, 0x90, 0x69, 0x58, - 0x65, 0x00, 0xc7, 0xf1, 0xa6, 0x45, 0xfd, 0x5a, 0xe6, 0x05, 0x4b, 0xb2, - 0x3a, 0xdf, 0xa9, 0xd9, 0xe5, 0xa6, 0xe5, 0xe2, 0x5b, 0x3b, 0x2f, 0x57, - 0x6c, 0xc4, 0x06, 0xe1, 0x8e, 0x15, 0x98, 0xc8, 0x5e, 0x63, 0xba, 0x37, - 0xe6, 0x91, 0x5f, 0x1c, 0x5b, 0x77, 0xb5, 0x91, 0x07, 0x3a, 0xa6, 0x67, - 0x6d, 0xdf, 0x15, 0x62, 0x6b, 0x3b, 0xed, 0xa2, 0xc7, 0x46, 0x52, 0x8f, - 0xf2, 0x9f, 0x69, 0x00, 0xb8, 0x49, 0xcf, 0xd4, 0xf0, 0x95, 0x51, 0xda, - 0x0f, 0x4e, 0x0d, 0x11, 0x2f, 0x27, 0x73, 0xe9, 0x13, 0xcb, 0xa1, 0xfc, - 0x6b, 0x45, 0xf0, 0xfd, 0xc7, 0x17, 0xaa, 0x0c, 0xac, 0x98, 0xc4, 0x6c, - 0xf0, 0x32, 0x45, 0x67, 0xfe, 0x6f, 0x2e, 0xfb, 0xec, 0x19, 0xda, 0xbd, - 0x93, 0x5f, 0x50, 0xc2, 0x22, 0x9a, 0x3a, 0x5b, 0x31, 0xf5, 0x4e, 0x91, - 0xa6, 0xea, 0x67, 0xdd, 0x69, 0xf4, 0xd7, 0xea, 0x02, 0xbe, 0x55, 0x52, - 0xb9, 0x30, 0x21, 0xe5, 0xfc, 0x9a, 0x93, 0xd6, 0x6c, 0x33, 0x06, 0xb9, - 0xe3, 0xb0, 0x6a, 0xff, 0x9e, 0xc2, 0x5e, 0x1d, 0xd6, 0xdb, 0xa1, 0x60, - 0x34, 0x5d, 0x08, 0xf9, 0xeb, 0xd6, 0x1f, 0x90, 0xf1, 0xf4, 0x07, 0x47, - 0xbf, 0xd9, 0xc9, 0xe8, 0xcf, 0xce, 0xa5, 0x1d, 0xb0, 0xd9, 0xbe, 0xc7, - 0xfb, 0xcc, 0xac, 0x3e, 0x92, 0x59, 0x0d, 0x1d, 0x65, 0x16, 0xa3, 0xdc, - 0x9b, 0x72, 0x22, 0x46, 0x04, 0xca, 0xb3, 0x5a, 0x2f, 0x3d, 0x99, 0x5c, - 0xb5, 0xb9, 0x30, 0xe3, 0xde, 0x8c, 0xba, 0xc7, 0x4c, 0xe5, 0x34, 0x6e, - 0xf4, 0x75, 0xf4, 0x38, 0x01, 0xf1, 0x61, 0xb8, 0x2b, 0xc3, 0x6f, 0xae, - 0xd1, 0x0a, 0x9d, 0x48, 0xc9, 0xe7, 0xc3, 0xe7, 0xc9, 0xe1, 0x6f, 0x96, - 0xa0, 0xc2, 0x91, 0xfd, 0xad, 0x99, 0x48, 0xde, 0xfc, 0xa3, 0x6e, 0xe3, - 0x94, 0x0e, 0xb5, 0xf6, 0x24, 0x8b, 0xce, 0x70, 0x3c, 0xdc, 0xe2, 0x66, - 0x9f, 0xe3, 0x6b, 0xc5, 0xd1, 0x97, 0x38, 0x12, 0x46, 0x37, 0xd6, 0x9a, - 0x4c, 0x6d, 0x4a, 0x2d, 0xc3, 0x28, 0x20, 0x2f, 0x55, 0x67, 0x17, 0x71, - 0xd3, 0x5c, 0xdc, 0xa3, 0x23, 0x60, 0x25, 0x2d, 0xe0, 0xc2, 0xed, 0xee, - 0x67, 0x9f, 0x26, 0xfb, 0x2f, 0x63, 0xf2, 0x6a, 0x23, 0x45, 0x26, 0x2c, - 0x33, 0x8a, 0xf2, 0xd1, 0xb2, 0x77, 0x99, 0x98, 0xd6, 0x18, 0xfe, 0xf3, - 0xff, 0xa4, 0x36, 0x03, 0xf4, 0xf5, 0xb1, 0xca, 0xa3, 0x5f, 0xe2, 0xc6, - 0xb2, 0x55, 0x2c, 0xaa, 0x64, 0xef, 0x28, 0x3a, 0x9e, 0x98, 0x01, 0x57, - 0x49, 0x98, 0x61, 0x4f, 0x42, 0x57, 0x00, 0x19, 0xb9, 0xa8, 0xec, 0xed, - 0x2b, 0x63, 0xf3, 0x0c, 0x3a, 0x1f, 0x10, 0xab, 0xe9, 0x6e, 0x61, 0x69, - 0xd1, 0x2d, 0xf3, 0x1f, 0xaa, 0x00, 0x57, 0xe2, 0xab, 0x74, 0xcd, 0xff, - 0x97, 0x2c, 0x3b, 0x67, 0xae, 0xa3, 0xfc, 0x69, 0xa9, 0x4e, 0x42, 0x07, - 0xfc, 0xbf, 0x36, 0x1a, 0xef, 0x6d, 0x6d, 0x14, 0x61, 0x30, 0x27, 0x98, - 0xfa, 0xf8, 0xc9, 0x70, 0xb4, 0xaa, 0x53, 0x48, 0x72, 0x3f, 0x58, 0x69, - 0x8d, 0x08, 0xc8, 0x09, 0x2b, 0xfc, 0x1d, 0xa1, 0x92, 0xae, 0x62, 0xa0, - 0xea, 0x05, 0x40, 0xac, 0x9c, 0xaf, 0x0e, 0xf4, 0x1e, 0x45, 0x33, 0xee, - 0x31, 0x39, 0x08, 0x4b, 0x54, 0x02, 0x2d, 0x03, 0x1c, 0xe6, 0x2d, 0x0c, - 0xd0, 0x92, 0x44, 0xd6, 0xa1, 0x57, 0x4e, 0x17, 0xde, 0xe6, 0x4f, 0x6a, - 0x07, 0x9f, 0x58, 0xe2, 0x27, 0xdb, 0xa9, 0x0c, 0x19, 0x56, 0xa3, 0xb4, - 0xc4, 0xe8, 0xa3, 0x52, 0x9f, 0x6a, 0xc9, 0xb1, 0xda, 0xe9, 0xef, 0x12, - 0xc1, 0x6d, 0x5b, 0x04, 0x20, 0x93, 0xac, 0xf4, 0x38, 0x95, 0xdb, 0x50, - 0xa6, 0x2e, 0x5c, 0x3f, 0x2d, 0x32, 0x50, 0x03, 0x73, 0x64, 0x3a, 0xd5, - 0xfd, 0x98, 0x1c, 0x57, 0xc3, 0xe7, 0xf7, 0x14, 0x13, 0x15, 0x2a, 0xa2, - 0x5f, 0xa0, 0x67, 0xdd, 0x67, 0x00, 0x09, 0xc6, 0xfe, 0xad, 0x06, 0x4c, - 0x5e, 0x9a, 0x5b, 0x55, 0x06, 0x8c, 0x9a, 0x2a, 0x51, 0x0e, 0x4f, 0x15, - 0xcc, 0xe1, 0x53, 0x9c, 0x43, 0x37, 0xc1, 0x3e, 0x02, 0x4b, 0x98, 0x6f, - 0x9b, 0x60, 0x31, 0x2c, 0x2b, 0x9d, 0xda, 0xe0, 0x1d, 0xe4, 0x49, 0x66, - 0x65, 0x18, 0xfb, 0x24, 0x97, 0xe0, 0x2d, 0xf5, 0x44, 0x23, 0x09, 0x01, - 0xf9, 0xf5, 0x29, 0xff, 0x01, 0x36, 0xb9, 0x0e, 0x9b, 0xb3, 0x23, 0x1e, - 0xe5, 0x12, 0xbb, 0x3a, 0x04, 0x14, 0xb8, 0x23, 0x43, 0x95, 0xc1, 0x9d, - 0x57, 0x45, 0x46, 0x4c, 0x8f, 0x35, 0x25, 0x5f, 0x2b, 0xd9, 0xc6, 0xdd, - 0x61, 0xb8, 0xbb, 0x4d, 0x49, 0xef, 0x6e, 0x0c, 0x50, 0x07, 0xc9, 0x9b, - 0x2e, 0xb7, 0xbe, 0x23, 0xc3, 0xcf, 0x9d, 0xeb, 0x13, 0xc8, 0xeb, 0x72, - 0x51, 0x71, 0x69, 0x35, 0xf3, 0xce, 0x35, 0x45, 0x02, 0xba, 0x44, 0x5d, - 0xaf, 0xd0, 0xe5, 0x1d, 0x9b, 0x18, 0xbb, 0x62, 0xce, 0xaf, 0x40, 0x48, - 0x40, 0x2a, 0x5d, 0xcd, 0xa7, 0x2b, 0x8f, 0xf4, 0x4a, 0x4c, 0xe1, 0x59, - 0x40, 0x63, 0x33, 0xae, 0xd8, 0x9d, 0x4d, 0x11, 0x3d, 0x2d, 0x11, 0xc6, - 0x8c, 0xa9, 0xab, 0xa2, 0x08, 0xb8, 0xbf, 0x09, 0x66, 0xbc, 0xd7, 0xab, - 0xce, 0x0d, 0xe0, 0x9e, 0x51, 0x2f, 0x5c, 0xc7, 0x21, 0xb9, 0xcf, 0xc4, - 0x8b, 0xc0, 0x4b, 0x04, 0x1b, 0xfd, 0x43, 0xcf, 0xa4, 0x72, 0x62, 0x04, - 0x0b, 0x1f, 0x9f, 0x35, 0x9d, 0xa9, 0x19, 0x71, 0x06, 0xda, 0x03, 0x0f, - 0xcc, 0x3a, 0xf4, 0x3a, 0xaf, 0x07, 0x0f, 0xf2, 0x3e, 0x4a, 0xd3, 0x41, - 0x6a, 0x90, 0x35, 0x39, 0x4c, 0x1d, 0x2f, 0x05, 0xff, 0xcf, 0xc0, 0xbe, - 0x0f, 0xaf, 0x90, 0x4e, 0x45, 0x8c, 0x78, 0x4d, 0x6b, 0xf2, 0x47, 0x26, - 0xe9, 0x0d, 0xee, 0xd3, 0x97, 0x44, 0xaf, 0x6f, 0x95, 0x30, 0x9c, 0x08, - 0xe5, 0x18, 0x9e, 0xad, 0xd2, 0x2a, 0x0c, 0x21, 0x67, 0x50, 0x28, 0x4f, - 0x31, 0x9c, 0xee, 0xb2, 0x95, 0xbd, 0xef, 0xc0, 0xd0, 0x0d, 0xd4, 0x6e, - 0xff, 0x93, 0x12, 0xc3, 0x51, 0x41, 0xe4, 0x6c, 0x19, 0x09, 0xd7, 0x0a, - 0xe0, 0xea, 0x0a, 0xe7, 0xa8, 0x4b, 0x60, 0xd6, 0x0c, 0x4d, 0xb5, 0x29, - 0x01, 0x74, 0xf9, 0x40, 0x8c, 0x6b, 0x11, 0xf6, 0xe4, 0xc9, 0x3c, 0x1a, - 0xf7, 0xce, 0x2c, 0xd8, 0xe3, 0x0e, 0xc5, 0xb9, 0x6c, 0x40, 0x44, 0xc9, - 0x04, 0xf6, 0x5c, 0xe1, 0x9f, 0xc7, 0xe0, 0x68, 0xe7, 0x6a, 0x92, 0xe7, - 0xb2, 0x12, 0x72, 0x3f, 0xfd, 0xc3, 0x06, 0xeb, 0x0a, 0xab, 0x6d, 0xad, - 0x03, 0x0b, 0x5d, 0xcc, 0x49, 0x04, 0x52, 0x19, 0xd4, 0x9d, 0x67, 0xbf, - 0xd3, 0xf4, 0x22, 0x76, 0x99, 0x52, 0xf5, 0xb5, 0x15, 0x38, 0x58, 0x57, - 0x9a, 0xa2, 0xd1, 0xbb, 0x3a, 0x07, 0xe2, 0xd6, 0x8d, 0x69, 0x9e, 0x5c, - 0xf4, 0xba, 0xda, 0x4a, 0x4d, 0x73, 0xdc, 0x32, 0xfd, 0xe1, 0x3a, 0x16, - 0xf1, 0x09, 0x26, 0x3b, 0x2a, 0xa9, 0xa7, 0x2c, 0xd3, 0xcf, 0x6b, 0xc5, - 0xb5, 0xbc, 0x71, 0xb6, 0x9e, 0xa0, 0x6a, 0x69, 0xa5, 0xeb, 0x54, 0x87, - 0xe9, 0x4f, 0x69, 0x39, 0xc5, 0x54, 0x28, 0x55, 0xb9, 0xff, 0x5d, 0x9e, - 0x17, 0x8e, 0x8c, 0xd5, 0x14, 0x5c, 0xa7, 0x33, 0x5a, 0x2f, 0x2d, 0x37, - 0x0e, 0xf2, 0x54, 0x64, 0x9d, 0xdf, 0x49, 0xab, 0xd3, 0x0f, 0xbd, 0xad, - 0x19, 0xb9, 0xcf, 0x0f, 0x40, 0x62, 0x4b, 0x93, 0xd7, 0xf4, 0x3b, 0xee, - 0x2b, 0x97, 0xe3, 0x55, 0xb3, 0x5b, 0x3f, 0x93, 0xa5, 0xf1, 0x40, 0x99, - 0xa1, 0x69, 0xbd, 0xf3, 0xf0, 0xb1, 0x6e, 0x5c, 0xba, 0x4a, 0xc4, 0x51, - 0x8e, 0xe1, 0x5c, 0xb8, 0x92, 0xb5, 0x43, 0xc4, 0x9e, 0x38, 0x0d, 0xfb, - 0x60, 0xb3, 0xe6, 0x0f, 0x55, 0x94, 0x01, 0xaf, 0xaa, 0xc3, 0x6d, 0xea, - 0xb2, 0xfc, 0xb0, 0x06, 0x29, 0x0f, 0xd3, 0x95, 0xb9, 0xf1, 0x8b, 0xce, - 0xd3, 0x5d, 0x16, 0xbf, 0x5c, 0x24, 0xc5, 0x36, 0x98, 0x8c, 0x5b, 0x43, - 0xe7, 0xfe, 0x77, 0xda, 0xc5, 0xd8, 0xf6, 0x72, 0xba, 0xcf, 0x9c, 0x18, - 0x58, 0xb8, 0xe4, 0x1d, 0xf6, 0xfb, 0x3b, 0xb4, 0x1f, 0xea, 0xa3, 0xe3, - 0xd5, 0xbe, 0x3f, 0xd5, 0xf9, 0xc4, 0x00, 0x8e, 0x17, 0x22, 0x3d, 0x96, - 0xd8, 0xb6, 0xa5, 0xf6, 0xcd, 0x55, 0x48, 0x8b, 0x1b, 0x38, 0x9c, 0xd7, - 0x6d, 0x40, 0x2a, 0x5f, 0xcf, 0xcb, 0x67, 0xa4, 0x8c, 0xf4, 0x8f, 0x70, - 0x34, 0xeb, 0x70, 0xcd, 0xee, 0x1c, 0xbd, 0xae, 0xd1, 0xc1, 0xf8, 0x62, - 0x45, 0xb5, 0x5d, 0xe6, 0x0b, 0xd4, 0x3d, 0x23, 0xf0, 0x27, 0x44, 0x56, - 0x32, 0x4d, 0xb1, 0x6c, 0x5d, 0x33, 0x94, 0x77, 0xe3, 0xac, 0x54, 0x56, - 0x24, 0x05, 0x26, 0x4a, 0xf0, 0x59, 0xfb, 0x1f, 0xa4, 0x0f, 0xbe, 0x9e, - 0xbc, 0x76, 0x9d, 0x5a, 0xed, 0x15, 0x97, 0x4e, 0x05, 0x8a, 0x8b, 0xff, - 0xc7, 0x9b, 0x67, 0x32, 0x12, 0x41, 0x04, 0xcb, 0x24, 0xae, 0x9e, 0xcc, - 0xd6, 0xc6, 0x67, 0x53, 0xfa, 0x29, 0x37, 0x73, 0xc6, 0xdf, 0xf2, 0x56, - 0x72, 0x06, 0x03, 0xaa, 0x5d, 0x07, 0xac, 0x38, 0xb9, 0x2a, 0x61, 0x02, - 0x24, 0xcf, 0x54, 0x3f, 0x98, 0xb0, 0x5c, 0xba, 0xe3, 0x15, 0x27, 0x52, - 0x63, 0x43, 0x12, 0x62, 0x33, 0x02, 0xb8, 0x69, 0x52, 0x70, 0x6c, 0xc0, - 0x23, 0x37, 0x65, 0x4b, 0xc9, 0xea, 0x98, 0x06, 0xde, 0x3d, 0x59, 0x72, - 0x94, 0x48, 0x60, 0xeb, 0xe7, 0xaa, 0x68, 0x72, 0x22, 0x15, 0x39, 0xf0, - 0x47, 0x43, 0xeb, 0x37, 0xb1, 0x3b, 0x9e, 0x05, 0x12, 0xdb, 0x74, 0x18, - 0xfe, 0x11, 0xcb, 0xae, 0xe0, 0xed, 0x1c, 0xe3, 0x19, 0x71, 0x56, 0xa6, - 0x04, 0xe6, 0x20, 0x62, 0xfd, 0xb1, 0x57, 0x44, 0xca, 0x3f, 0xdf, 0x51, - 0x23, 0x76, 0x3b, 0x70, 0x27, 0x33, 0x62, 0x74, 0x94, 0xff, 0x70, 0xcc, - 0xd4, 0xbf, 0x67, 0x12, 0x17, 0x5f, 0x71, 0xf8, 0x8f, 0x09, 0xca, 0xb5, - 0x49, 0x38, 0xcf, 0x1f, 0x94, 0x9a, 0xe6, 0x76, 0x0e, 0xa6, 0x5a, 0x2c, - 0x36, 0x61, 0x41, 0x2d, 0x14, 0x2f, 0x35, 0xa2, 0xaa, 0x2d, 0xd5, 0x54, - 0x3c, 0x4e, 0xa0, 0x63, 0xa9, 0x9e, 0xe9, 0x65, 0x62, 0xcf, 0x5a, 0x1a, - 0xb9, 0x70, 0xf7, 0xf1, 0x8a, 0xc7, 0x19, 0x6e, 0x34, 0xa0, 0xbb, 0x1b, - 0x76, 0x9b, 0x60, 0x20, 0xfd, 0xff, 0xe1, 0x40, 0x5e, 0xd7, 0x49, 0xd3, - 0x3c, 0x0f, 0x52, 0xae, 0x37, 0x38, 0x1d, 0xd5, 0xd0, 0xe7, 0xd6, 0xfc, - 0x06, 0x3b, 0x50, 0x06, 0x9c, 0xb4, 0x37, 0x9a, 0x53, 0x09, 0x56, 0xa4, - 0xa8, 0x64, 0x70, 0xa7, 0xaf, 0xb9, 0xd9, 0x19, 0xbc, 0x5b, 0x04, 0x07, - 0x68, 0xc0, 0xa4, 0xc0, 0x3d, 0x32, 0x36, 0x94, 0x24, 0xd3, 0x36, 0x1f, - 0xfc, 0xd8, 0x26, 0x49, 0x94, 0xd2, 0x1e, 0x8b, 0x0c, 0x70, 0x6e, 0xd7, - 0xd2, 0x37, 0x8f, 0x13, 0xef, 0x41, 0xdb, 0x53, 0xb5, 0xba, 0xe5, 0xe3, - 0x0c, 0xcd, 0xa3, 0xfa, 0x74, 0x16, 0xd9, 0x42, 0x10, 0xa3, 0xe6, 0x26, - 0xd6, 0x74, 0xbc, 0x17, 0x9b, 0x2e, 0x4c, 0xe2, 0x13, 0x49, 0x0f, 0xc9, - 0xc2, 0x34, 0xae, 0x5b, 0x6b, 0x46, 0xbc, 0xc4, 0x62, 0xa0, 0x4a, 0x18, - 0x62, 0x69, 0x1c, 0xc3, 0x78, 0x36, 0xfa, 0xd9, 0x8d, 0xd0, 0xf9, 0x4f, - 0x56, 0x90, 0x4b, 0xca, 0xc4, 0xdd, 0x64, 0x2c, 0xd1, 0x3c, 0xa8, 0xbe, - 0x62, 0x8f, 0x2a, 0x11, 0x93, 0x71, 0x75, 0x70, 0x43, 0xd0, 0x5f, 0xfb, - 0x36, 0x2b, 0x35, 0x26, 0xda, 0xda, 0x25, 0x3c, 0x17, 0xf2, 0xb7, 0x36, - 0xd7, 0x8d, 0xd1, 0xbc, 0x2f, 0xe7, 0xf8, 0x55, 0x42, 0x2e, 0xe1, 0xc0, - 0x4a, 0xee, 0x3d, 0x5b, 0xc9, 0x69, 0x15, 0xc5, 0x42, 0x03, 0x2c, 0x46, - 0x02, 0x94, 0x91, 0xfb, 0x0f, 0x98, 0x8d, 0x32, 0xdf, 0x0b, 0x19, 0xda, - 0x9f, 0x96, 0x6e, 0x2d, 0xc4, 0xa1, 0x92, 0xc1, 0x73, 0x2f, 0x23, 0x9f, - 0x55, 0xc5, 0xb4, 0x8c, 0xef, 0xf3, 0xa2, 0x94, 0x8f, 0x6c, 0xd8, 0xb1, - 0x9d, 0x0d, 0x17, 0x93, 0x21, 0xd7, 0xae, 0xa8, 0x41, 0xd3, 0xf1, 0x9a, - 0xe3, 0x36, 0xca, 0x5f, 0xa4, 0xd9, 0xaf, 0x34, 0xbf, 0xe6, 0x9e, 0x4c, - 0xf0, 0xd1, 0xb0, 0x8c, 0x8e, 0x76, 0x3d, 0xb3, 0xf7, 0xd9, 0xfb, 0xbf, - 0x72, 0xae, 0xa8, 0x39, 0x00, 0xe5, 0x53, 0x17, 0x6c, 0x4e, 0x06, 0x22, - 0xc0, 0x10, 0xe7, 0x4d, 0xff, 0x75, 0x03, 0x01, 0x18, 0x46, 0xfd, 0xde, - 0x1e, 0x95, 0x46, 0xb8, 0x5b, 0x36, 0xbc, 0x1d, 0x95, 0x05, 0x8f, 0x5d, - 0x38, 0x41, 0x25, 0x2c, 0x9b, 0x34, 0x75, 0x9b, 0xf0, 0x8b, 0xaf, 0x0d, - 0x2e, 0xc2, 0x1a, 0x03, 0x61, 0xbe, 0xe8, 0x49, 0xbc, 0x9b, 0x45, 0xfb, - 0x35, 0x2b, 0x6c, 0xa1, 0x96, 0xa0, 0x08, 0x0e, 0xca, 0x01, 0xc0, 0x97, - 0xfa, 0xdf, 0x11, 0x1a, 0x0d, 0xf9, 0xc2, 0x5a, 0xe1, 0x4c, 0xb5, 0x37, - 0xff, 0x91, 0xb6, 0x96, 0xbf, 0x62, 0x04, 0x59, 0x69, 0x01, 0x68, 0x66, - 0x52, 0x66, 0x4a, 0x49, 0xe9, 0xe6, 0xe4, 0x44, 0x92, 0x5e, 0xaf, 0xf5, - 0x24, 0xdb, 0x6f, 0x21, 0xf9, 0x21, 0x58, 0x5f, 0xc4, 0xf0, 0x30, 0x90, - 0x68, 0xff, 0x58, 0x5c, 0xbd, 0x6f, 0x58, 0x77, 0xe0, 0x03, 0x68, 0x2a, - 0x1a, 0xa4, 0xd6, 0x9d, 0xd0, 0x38, 0x5a, 0xbd, 0x52, 0xa8, 0xc5, 0xf0, - 0xbc, 0xf2, 0x04, 0x49, 0x0e, 0x1b, 0x1b, 0x93, 0xc0, 0x65, 0xca, 0x05, - 0x42, 0x11, 0x03, 0xd6, 0xd5, 0x2c, 0x4c, 0xcd, 0xed, 0xb4, 0x54, 0xa4, - 0x3d, 0x46, 0x64, 0x4c, 0xc4, 0x8f, 0x0a, 0x95, 0x6a, 0x4f, 0xfb, 0x2e, - 0x1d, 0x5a, 0x8a, 0xcb, 0x31, 0x94, 0x21, 0x54, 0x51, 0xf5, 0x4e, 0x3e, - 0x32, 0x00, 0x12, 0x8e, 0x4c, 0x8c, 0x17, 0x90, 0xea, 0x8d, 0xfe, 0xc3, - 0xfe, 0x69, 0x10, 0xd9, 0x1c, 0x60, 0x91, 0xb6, 0xbb, 0x11, 0xb7, 0x77, - 0x1c, 0x69, 0xec, 0xb5, 0x28, 0x1e, 0x4b, 0xc8, 0xac, 0xe2, 0xe7, 0xe4, - 0xca, 0x1c, 0x6a, 0x16, 0xb8, 0x0a, 0x1c, 0xcb, 0xbd, 0x0e, 0x61, 0xf6, - 0x30, 0xa0, 0xb0, 0x11, 0x57, 0xd0, 0xa0, 0xe5, 0x63, 0xb4, 0x5e, 0x65, - 0x54, 0xbd, 0x2b, 0xcf, 0x92, 0xb3, 0xe2, 0xad, 0xba, 0x6b, 0xd8, 0x8b, - 0xd4, 0xc9, 0x49, 0x6b, 0xe9, 0x6f, 0x30, 0x9a, 0x8d, 0x1a, 0xd2, 0x73, - 0xed, 0x01, 0x20, 0x76, 0x59, 0x3b, 0x63, 0x15, 0xf7, 0x4a, 0x93, 0xf5, - 0xe8, 0xaa, 0x77, 0xf7, 0xee, 0x16, 0x26, 0x6d, 0x6d, 0x1e, 0xb3, 0x04, - 0xd1, 0x36, 0x6d, 0xdb, 0xe1, 0xee, 0xdf, 0x69, 0x0e, 0x28, 0x3b, 0x5a, - 0x37, 0x51, 0x61, 0x10, 0x58, 0xd0, 0x58, 0x75, 0x63, 0x5b, 0x76, 0x3e, - 0x55, 0x0a, 0x07, 0x3e, 0xfe, 0xb9, 0x6e, 0x4c, 0xfc, 0x1b, 0x8a, 0xa5, - 0x03, 0x1a, 0xb9, 0x04, 0x22, 0x60, 0x33, 0x66, 0xda, 0xb7, 0x1c, 0x3a, - 0xb6, 0x92, 0x45, 0x01, 0xc2, 0x73, 0x49, 0x6a, 0x9a, 0x54, 0x10, 0xe2, - 0x36, 0x45, 0xbd, 0x1d, 0x33, 0x2a, 0xd2, 0xc9, 0x70, 0x63, 0x39, 0xcf, - 0xf7, 0x76, 0x70, 0x37, 0xde, 0x23, 0x4c, 0xd2, 0xa1, 0x37, 0x2c, 0x52, - 0xae, 0xa3, 0xfb, 0x45, 0xd0, 0xb9, 0x46, 0x3e, 0x2a, 0xe8, 0xe9, 0x64, - 0xe1, 0x16, 0x30, 0x08, 0x36, 0xcd, 0x9e, 0x15, 0x44, 0xdd, 0x27, 0xa9, - 0x1c, 0x29, 0xf1, 0xa7, 0x20, 0x21, 0x59, 0x61, 0x4c, 0xbe, 0x5e, 0x20, - 0x36, 0xca, 0xb8, 0x6d, 0xb5, 0x0c, 0x29, 0x41, 0xa1, 0xd3, 0x8a, 0x2b, - 0x34, 0xd2, 0x5b, 0x92, 0x12, 0x1f, 0x36, 0x9f, 0x5d, 0x02, 0x2a, 0xca, - 0xac, 0x5b, 0x29, 0x8b, 0x51, 0x3a, 0x65, 0xf5, 0xdf, 0x60, 0x6c, 0x0c, - 0xa7, 0x95, 0x3d, 0x52, 0x13, 0xb4, 0xbd, 0x8c, 0xf1, 0xac, 0xba, 0x3c, - 0x24, 0x6c, 0xc0, 0xdb, 0xa8, 0x5b, 0xd4, 0xdb, 0xf5, 0xcd, 0xaf, 0xdf, - 0x2f, 0xe2, 0x71, 0xcc, 0x00, 0x3a, 0x87, 0xdc, 0x23, 0xdf, 0xa7, 0xb0, - 0xb6, 0xcb, 0xff, 0x1c, 0xe7, 0xfe, 0xa8, 0xa8, 0xea, 0xad, 0x37, 0x58, - 0xfd, 0x58, 0x01, 0xa5, 0xe4, 0x5d, 0xdf, 0x4a, 0x10, 0x0b, 0xc3, 0x5e, - 0xd1, 0x0d, 0x4c, 0x21, 0x0e, 0x51, 0x95, 0x99, 0x58, 0xdf, 0x6d, 0xa8, - 0x8e, 0xf7, 0x51, 0xa6, 0x53, 0x44, 0x6b, 0xb3, 0x00, 0x64, 0xe1, 0x6f, - 0x3d, 0x19, 0x40, 0x30, 0x46, 0x95, 0x9b, 0x39, 0xa5, 0x0d, 0x77, 0xaa, - 0xb1, 0x57, 0x57, 0x08, 0xe0, 0xab, 0xd1, 0xd5, 0x25, 0x59, 0x11, 0x2f, - 0x62, 0xbf, 0x50, 0x95, 0x02, 0x18, 0xdb, 0x2d, 0xbc, 0xdb, 0xfa, 0x3d, - 0x45, 0xab, 0xb5, 0x2e, 0x8e, 0x9b, 0x49, 0xe5, 0x50, 0xbd, 0x1f, 0x1c, - 0x64, 0xd8, 0x9d, 0x0c, 0x0c, 0xe8, 0xf3, 0x54, 0x49, 0x95, 0x3d, 0x71, - 0xa1, 0x16, 0x98, 0x08, 0x16, 0x37, 0x6a, 0x95, 0xa3, 0xaa, 0xb6, 0xf7, - 0x0e, 0x99, 0x2a, 0x0b, 0x68, 0x49, 0xd1, 0xa4, 0x33, 0x3e, 0x57, 0xfc, - 0xc3, 0x5a, 0xa9, 0x1e, 0xbf, 0xf1, 0x19, 0x2d, 0xee, 0xfa, 0x01, 0xa8, - 0x64, 0x0d, 0x74, 0x54, 0xed, 0x4d, 0xab, 0xad, 0x23, 0x25, 0xde, 0xef, - 0xb4, 0x54, 0xfe, 0x3f, 0xba, 0xe0, 0x0e, 0x76, 0x1b, 0x1a, 0xa9, 0xe3, - 0x53, 0xbd, 0xde, 0x65, 0x6b, 0x08, 0x6d, 0x71, 0x45, 0xb4, 0xf8, 0x9a, - 0x06, 0x3d, 0xae, 0x87, 0x25, 0x51, 0x9d, 0x46, 0x33, 0xf3, 0x77, 0x6d, - 0xb6, 0x5d, 0xbe, 0x08, 0xfc, 0xf5, 0x31, 0xa1, 0xd5, 0x22, 0x19, 0xcd, - 0x66, 0x82, 0x19, 0xf5, 0xf5, 0x29, 0x28, 0x83, 0xa5, 0xa3, 0x30, 0x50, - 0xa1, 0xfb, 0xf6, 0x36, 0x31, 0xbf, 0xb5, 0xc4, 0xe7, 0x99, 0xd5, 0x4f, - 0xf5, 0xb0, 0xf5, 0x9a, 0x12, 0x4e, 0x1b, 0xdb, 0x4d, 0x21, 0x6d, 0xda, - 0xeb, 0x6a, 0x11, 0x55, 0xa2, 0xe2, 0x6a, 0xe9, 0xe8, 0x01, 0xa1, 0x97, - 0x68, 0xc2, 0x30, 0xd2, 0xfa, 0x60, 0xec, 0x4d, 0x54, 0x5b, 0x9e, 0x2d, - 0x97, 0xca, 0x1b, 0xc2, 0xb2, 0x14, 0x3f, 0xaf, 0x23, 0x54, 0xe8, 0x0c, - 0x3c, 0xed, 0x50, 0x32, 0xff, 0x3a, 0x8c, 0xe6, 0xdc, 0x17, 0xad, 0x65, - 0x05, 0x35, 0x28, 0xc9, 0x77, 0x21, 0xb1, 0x9a, 0xec, 0xf1, 0xd6, 0x53, - 0xb9, 0xb3, 0xe0, 0x41, 0x11, 0x85, 0x2e, 0x1a, 0xb5, 0xad, 0xab, 0x9b, - 0xae, 0x69, 0xa0, 0xb1, 0xa0, 0x07, 0x72, 0x8f, 0x4a, 0xd9, 0x5e, 0x1f, - 0x29, 0x9e, 0x4d, 0x0b, 0x9a, 0x82, 0xfe, 0x26, 0xc5, 0x17, 0x5b, 0x51, - 0x46, 0xf2, 0xf7, 0x27, 0xba, 0x06, 0x91, 0x0e, 0xc2, 0x07, 0xb3, 0x1b, - 0x54, 0xad, 0xb5, 0xf5, 0x02, 0xc1, 0x39, 0x6a, 0x2a, 0xd7, 0x46, 0xbf, - 0x3d, 0x39, 0x4e, 0x8e, 0xb1, 0x58, 0xf4, 0x90, 0xa7, 0x08, 0x0e, 0x99, - 0x64, 0x33, 0x3e, 0x1e, 0x09, 0xb7, 0x88, 0xa0, 0x29, 0xb2, 0x0b, 0x5c, - 0x15, 0xd4, 0x36, 0x55, 0x42, 0x48, 0xe7, 0x47, 0xf9, 0xb5, 0x05, 0xcd, - 0x40, 0xde, 0x92, 0x27, 0x11, 0x3b, 0xad, 0x3e, 0x9b, 0x95, 0x38, 0xad, - 0x11, 0xd5, 0x9d, 0x1d, 0x38, 0x60, 0xde, 0x31, 0xe3, 0x40, 0xb2, 0xf2, - 0x8e, 0xb4, 0x03, 0xaa, 0x51, 0x15, 0xe4, 0x36, 0x4d, 0x43, 0x05, 0xbc, - 0x36, 0x82, 0xdf, 0xfc, 0xfd, 0x23, 0x4d, 0xad, 0x9f, 0xf4, 0xce, 0xfb, - 0xaf, 0x46, 0xb3, 0x59, 0x98, 0x91, 0x85, 0x4a, 0xa7, 0x67, 0x70, 0xbd, - 0xca, 0x12, 0x9b, 0x6b, 0x00, 0xe5, 0x82, 0x3c, 0x37, 0x99, 0x8d, 0x6b, - 0x32, 0xaf, 0x08, 0x05, 0x36, 0xd6, 0xd7, 0xfb, 0x65, 0xce, 0x4e, 0x9f, - 0xd5, 0xd1, 0x3a, 0x42, 0xb0, 0x31, 0x62, 0xd0, 0xe2, 0xe5, 0x37, 0xc1, - 0x6d, 0x8a, 0x24, 0xa4, 0x19, 0xc2, 0x59, 0x3c, 0x44, 0xef, 0x96, 0xf6, - 0x35, 0x00, 0xe7, 0xe6, 0x2e, 0x82, 0xa5, 0x4a, 0x2f, 0xa2, 0xfe, 0x1f, - 0x53, 0x52, 0x31, 0x97, 0x47, 0x37, 0x15, 0x26, 0xa7, 0x8d, 0xd3, 0x21, - 0x6a, 0x98, 0x6d, 0xf1, 0xe6, 0x29, 0xf8, 0x9d, 0xaf, 0x5f, 0x3e, 0x3a, - 0xbc, 0x65, 0xb2, 0xd8, 0x41, 0xbc, 0xd6, 0x39, 0x3c, 0xc7, 0x2f, 0x2e, - 0xa3, 0x08, 0x9a, 0x21, 0x05, 0xe0, 0x4c, 0x06, 0x4d, 0x82, 0x68, 0x5d, - 0x4a, 0x9e, 0xca, 0xee, 0x3d, 0x28, 0x45, 0x0e, 0xff, 0xdd, 0xe6, 0x46, - 0xbc, 0xf8, 0x19, 0x5b, 0xda, 0xf4, 0x14, 0xd1, 0x4f, 0x02, 0x6e, 0xf6, - 0x01, 0x2d, 0xd6, 0xb6, 0x8b, 0xf5, 0x9c, 0x4e, 0xee, 0xe7, 0xc8, 0x10, - 0x05, 0xb6, 0x6d, 0x8d, 0x49, 0xe2, 0x04, 0xec, 0x4d, 0x61, 0x67, 0xc2, - 0x19, 0x27, 0xab, 0xe1, 0x0d, 0x29, 0xab, 0xf2, 0xa0, 0xf9, 0x69, 0x0d, - 0x81, 0x29, 0x4d, 0x40, 0x6d, 0xd7, 0xda, 0xb7, 0x9e, 0x0b, 0x90, 0x9c, - 0x9b, 0xeb, 0x59, 0x2c, 0xc9, 0xa4, 0x85, 0x95, 0xe2, 0xda, 0x2d, 0xe4, - 0x60, 0x9a, 0x64, 0x21, 0xbf, 0x1d, 0x57, 0x4d, 0x3e, 0xa0, 0x35, 0x0f, - 0xce, 0xd7, 0xe1, 0x44, 0x63, 0x9e, 0xe8, 0x8e, 0xbd, 0xc8, 0xc1, 0x65, - 0xe1, 0xd2, 0x09, 0x45, 0xd3, 0xbd, 0x13, 0xb2, 0x1f, 0x46, 0x32, 0xa6, - 0xcd, 0xa3, 0x44, 0x4c, 0x52, 0xa7, 0xe7, 0x54, 0xea, 0xe6, 0xa0, 0xce, - 0x02, 0x8b, 0x69, 0xdb, 0xde, 0xef, 0x5f, 0xcb, 0x6f, 0x6e, 0x0f, 0xf5, - 0x68, 0x42, 0xf4, 0x37, 0x08, 0x1f, 0x87, 0x55, 0xb4, 0xbc, 0x8a, 0x84, - 0x84, 0x10, 0xc6, 0x36, 0x3e, 0x8a, 0x6b, 0x4e, 0xd5, 0xc8, 0x64, 0xcb, - 0xb5, 0xc0, 0xfe, 0x99, 0x66, 0xaa, 0xb1, 0x50, 0xa7, 0x70, 0xd9, 0xa6, - 0x17, 0x2d, 0xd4, 0xad, 0xdf, 0xf2, 0x2f, 0xac, 0xae, 0xae, 0x12, 0xcf, - 0x5b, 0x09, 0xf2, 0x2d, 0xb4, 0x21, 0xc9, 0xd1, 0x58, 0xdb, 0x4e, 0x9b, - 0xe0, 0x32, 0x08, 0xe4, 0x4a, 0xe6, 0x9c, 0x61, 0x25, 0x90, 0x08, 0xf2, - 0xb1, 0xc1, 0x3c, 0x25, 0x0b, 0x5a, 0x03, 0x40, 0xdb, 0x06, 0x5f, 0xd2, - 0x60, 0x8e, 0x0a, 0x5b, 0xc8, 0xa2, 0xcd, 0xac, 0xb3, 0x54, 0x0b, 0xb6, - 0x05, 0x45, 0xd7, 0xa8, 0x8a, 0xfa, 0x8a, 0xba, 0x09, 0x53, 0x81, 0xd7, - 0xf5, 0x40, 0x61, 0x46, 0xf2, 0x22, 0xe4, 0x21, 0xb4, 0x26, 0x41, 0x10, - 0x25, 0x4d, 0x93, 0xc2, 0xa2, 0xae, 0xc3, 0xaa, 0xbe, 0x71, 0xa6, 0xaa, - 0xf7, 0xb1, 0xbf, 0x02, 0x22, 0xe9, 0xd7, 0xfb, 0xaa, 0x1d, 0x5d, 0xf5, - 0xe7, 0x5b, 0x63, 0xf2, 0xe6, 0x5c, 0xd6, 0x24, 0x6d, 0xb5, 0xca, 0xa3, - 0xe7, 0x57, 0x1a, 0xa5, 0xf7, 0x95, 0xc5, 0x92, 0x51, 0x65, 0x68, 0xc5, - 0xe6, 0x27, 0xa9, 0x94, 0x8a, 0xb6, 0xec, 0x0d, 0x9c, 0x51, 0xdf, 0x22, - 0xca, 0xdf, 0x5a, 0xf5, 0xe4, 0xad, 0xf4, 0xfc, 0x1f, 0x68, 0x9f, 0xdb, - 0x40, 0x4e, 0x6a, 0x1e, 0x5a, 0xd8, 0x6c, 0xd6, 0xef, 0xad, 0x64, 0xe7, - 0xcb, 0xfc, 0x44, 0xae, 0xa5, 0x62, 0x65, 0xad, 0x2e, 0x6a, 0x46, 0xcf, - 0x0d, 0xd0, 0x46, 0x5e, 0x87, 0x37, 0xb6, 0xab, 0x70, 0x52, 0xee, 0x5a, - 0xa7, 0x13, 0xa3, 0xc3, 0x4b, 0x62, 0xe7, 0x31, 0x10, 0xed, 0x39, 0x1c, - 0x4a, 0xe3, 0xc1, 0x57, 0xcb, 0x45, 0xe4, 0x89, 0xee, 0x0e, 0x24, 0xc1, - 0xa6, 0xac, 0xd4, 0x0e, 0x9b, 0xe0, 0x26, 0x28, 0x08, 0x2b, 0xe1, 0xc9, - 0x42, 0x37, 0xa3, 0x46, 0xcc, 0x5d, 0x89, 0x10, 0x1f, 0x23, 0xcb, 0x1c, - 0x67, 0xe2, 0x6d, 0xaa, 0x66, 0xa5, 0xf5, 0xea, 0x94, 0x2b, 0x8c, 0xf6, - 0xf4, 0xd3, 0xfb, 0x9c, 0x96, 0x0a, 0x87, 0xaf, 0x5c, 0x19, 0xb4, 0x3b, - 0x26, 0xb2, 0x48, 0x55, 0x97, 0xfd, 0x3a, 0xec, 0x06, 0xe4, 0x58, 0x99, - 0x9a, 0x26, 0x4f, 0xe0, 0x9c, 0x67, 0x09, 0x05, 0x5b, 0x72, 0x8e, 0xd6, - 0xe4, 0x4e, 0xe2, 0x63, 0xb0, 0x9c, 0xf6, 0x92, 0xd3, 0x05, 0x3f, 0xb0, - 0x04, 0x5f, 0x02, 0x97, 0xf4, 0x42, 0x1d, 0x3b, 0x5c, 0x44, 0x00, 0x95, - 0x8b, 0xf5, 0x06, 0x40, 0xbd, 0xb8, 0xf7, 0x4b, 0x4a, 0xfa, 0xf0, 0x04, - 0x04, 0xd0, 0xa5, 0xb9, 0x3a, 0xa0, 0x2d, 0x0c, 0x1b, 0xec, 0x5a, 0x14, - 0xc8, 0x1d, 0x93, 0x86, 0xfd, 0x16, 0x68, 0xf8, 0x16, 0x9b, 0xb4, 0x88, - 0x99, 0x63, 0x0e, 0xd5, 0x20, 0x07, 0x43, 0x28, 0x26, 0xba, 0xf9, 0x97, - 0xed, 0x6b, 0x40, 0xb8, 0x07, 0x73, 0x59, 0xd5, 0x55, 0xa8, 0x64, 0x14, - 0x1c, 0xc5, 0xc0, 0x1f, 0x8d, 0x09, 0xae, 0x9c, 0x66, 0xa1, 0x94, 0xca, - 0x14, 0x46, 0xed, 0x46, 0x46, 0x25, 0x63, 0x5b, 0x2b, 0x95, 0x85, 0x05, - 0xc2, 0xb7, 0xeb, 0x06, 0x30, 0x5a, 0xf6, 0x22, 0x4e, 0x47, 0x1e, 0x0e, - 0x0c, 0xad, 0xd5, 0x11, 0xa8, 0x6a, 0x89, 0xd5, 0x49, 0xd4, 0xfa, 0x43, - 0xb0, 0x32, 0xb0, 0xb9, 0xb3, 0xda, 0x3f, 0x4f, 0xac, 0x4c, 0xc1, 0xa7, - 0x9f, 0xc2, 0xc2, 0x04, 0x70, 0xa2, 0x08, 0x01, 0xeb, 0x10, 0xa4, 0xa5, - 0x4c, 0xcd, 0xb3, 0x81, 0x4e, 0xbe, 0x6c, 0x51, 0x44, 0xf8, 0x82, 0xbd, - 0x42, 0x34, 0xfb, 0xdb, 0xb4, 0x32, 0xd2, 0x93, 0x63, 0x5e, 0xf6, 0x07, - 0x6e, 0x2c, 0xc2, 0xcf, 0xf4, 0x5d, 0x84, 0xe9, 0x5e, 0x5c, 0xa8, 0x39, - 0x28, 0x4a, 0xed, 0x15, 0x1b, 0xea, 0xe6, 0xde, 0x85, 0x92, 0x86, 0xe7, - 0x83, 0x4b, 0x87, 0xf7, 0x23, 0x60, 0xe2, 0x22, 0xd3, 0x32, 0x16, 0x4e, - 0x2f, 0xde, 0x01, 0x8b, 0x48, 0xea, 0xcd, 0x8a, 0x8b, 0xbc, 0xc6, 0x64, - 0xb2, 0x67, 0x47, 0xf5, 0x98, 0xf8, 0xca, 0xf1, 0x83, 0x66, 0xd7, 0x9a, - 0xef, 0xca, 0x20, 0xc2, 0xec, 0x8c, 0x38, 0xb1, 0x37, 0x13, 0x93, 0x92, - 0xba, 0xa1, 0xee, 0x6a, 0x57, 0x43, 0xaa, 0xdc, 0xdf, 0xa4, 0x3f, 0xc6, - 0xb6, 0xd6, 0x68, 0x54, 0xab, 0x36, 0xe9, 0x0f, 0x6f, 0xd5, 0xa1, 0x1b, - 0xa1, 0x02, 0xc9, 0x41, 0xef, 0x4f, 0x86, 0xcc, 0x1a, 0xfa, 0xd2, 0xdd, - 0x87, 0x04, 0xe0, 0x27, 0x38, 0xcf, 0x91, 0x95, 0xb4, 0x02, 0x10, 0x1d, - 0xc3, 0xcc, 0x6f, 0xaf, 0xbc, 0x94, 0x64, 0x47, 0xbc, 0x37, 0xde, 0xe3, - 0x2e, 0x89, 0x03, 0xb6, 0xd3, 0x28, 0x4a, 0x5e, 0x6d, 0x1e, 0xc5, 0x1a, - 0xa5, 0x0c, 0x92, 0xf7, 0xe2, 0x19, 0xe7, 0x39, 0xf0, 0xf2, 0x49, 0x8b, - 0xe6, 0x99, 0xd8, 0x4b, 0x0d, 0x6e, 0x3f, 0x57, 0x89, 0x9e, 0x0d, 0x34, - 0x4b, 0x52, 0xcd, 0x18, 0x57, 0xc7, 0x8e, 0x48, 0x03, 0x65, 0xd4, 0xdd, - 0xdf, 0x04, 0xf5, 0x39, 0x5e, 0x97, 0xbc, 0xc0, 0xc5, 0x91, 0xe7, 0x9d, - 0xbe, 0x28, 0x4c, 0xe7, 0xf4, 0xa0, 0x34, 0xee, 0xba, 0xa7, 0x8d, 0x52, - 0xc4, 0x07, 0x14, 0xd2, 0x93, 0xb0, 0x1d, 0x61, 0x53, 0x23, 0xc3, 0xe1, - 0xd2, 0xbf, 0xe1, 0xd6, 0x1f, 0x27, 0xcc, 0x8c, 0xe7, 0x0b, 0x09, 0x4f, - 0xe6, 0xa2, 0x41, 0xf4, 0x31, 0xbe, 0x95, 0x17, 0xfb, 0x50, 0xa4, 0xa4, - 0x51, 0x3c, 0x6f, 0xf8, 0x6a, 0xba, 0xac, 0xe4, 0x1e, 0x38, 0x78, 0x18, - 0x58, 0x31, 0x69, 0xc9, 0x52, 0xb0, 0xfc, 0x71, 0x54, 0xad, 0xe2, 0x8e, - 0xa2, 0xf2, 0x8e, 0x58, 0x11, 0x1d, 0xcc, 0x30, 0x74, 0x55, 0x41, 0x02, - 0x9b, 0x2a, 0x2f, 0x17, 0x97, 0xe4, 0x1a, 0xd0, 0xd5, 0x8f, 0x60, 0x10, - 0xdb, 0xc2, 0x69, 0x94, 0x0d, 0xaf, 0x44, 0xd0, 0x95, 0x3d, 0x50, 0xf4, - 0x27, 0x5e, 0xdc, 0x56, 0x5f, 0xa7, 0x4c, 0x41, 0xe5, 0x9e, 0xc8, 0x31, - 0xb0, 0x8e, 0x3f, 0xde, 0xdc, 0x42, 0x24, 0x93, 0x98, 0xce, 0x69, 0x90, - 0x98, 0x73, 0x06, 0xb9, 0x8e, 0xa4, 0x8d, 0x97, 0xb1, 0x41, 0x33, 0x64, - 0x5a, 0xae, 0xe8, 0x2f, 0x5f, 0x99, 0x64, 0x3e, 0xea, 0xd4, 0xbe, 0xa2, - 0x52, 0x2d, 0xc7, 0x56, 0x46, 0xfb, 0x33, 0xd8, 0xde, 0xe6, 0x74, 0xf6, - 0x2e, 0x2a, 0x26, 0xa1, 0x07, 0xcd, 0x3c, 0xca, 0x39, 0x74, 0x61, 0x4a, - 0x53, 0xf7, 0x8c, 0xd7, 0x3c, 0x4f, 0x4f, 0xd9, 0x14, 0x74, 0x56, 0xa8, - 0x3b, 0x3b, 0xe4, 0xe5, 0x70, 0x2e, 0xda, 0xde, 0xcd, 0x65, 0x4f, 0x2e, - 0xb6, 0x76, 0x17, 0x59, 0x6a, 0xaf, 0x0a, 0x24, 0x8c, 0x99, 0x0b, 0x2a, - 0xac, 0x46, 0x74, 0x2c, 0x3b, 0x40, 0x20, 0xad, 0x30, 0xab, 0x63, 0x34, - 0x8f, 0x30, 0x22, 0x50, 0x5c, 0xf8, 0x73, 0x21, 0x3e, 0xeb, 0x16, 0x44, - 0x30, 0xb9, 0x59, 0x0f, 0xf0, 0xe5, 0xb6, 0x6a, 0xde, 0x32, 0x03, 0x28, - 0x3c, 0xc8, 0xc2, 0x8d, 0x6b, 0x72, 0x2f, 0x3e, 0x2b, 0x99, 0xc1, 0xa6, - 0xdf, 0x5a, 0x91, 0x2d, 0x40, 0x39, 0xb2, 0x24, 0x27, 0x25, 0x26, 0x51, - 0xbb, 0xb5, 0x6a, 0x47, 0x38, 0x94, 0x2c, 0x3e, 0xa0, 0x96, 0x19, 0xf7, - 0x99, 0x0c, 0x34, 0x41, 0xb9, 0x0d, 0xad, 0x37, 0xa6, 0x0c, 0x38, 0x9c, - 0xee, 0x03, 0x68, 0x62, 0x76, 0x64, 0x18, 0x63, 0x62, 0x10, 0xd6, 0x2a, - 0xca, 0xdb, 0x73, 0x9b, 0x93, 0x35, 0x29, 0xb0, 0xec, 0x6c, 0xa8, 0x1f, - 0xa6, 0xac, 0xf8, 0xd8, 0xfa, 0x98, 0xc3, 0x02, 0xf0, 0xf5, 0x66, 0x2c, - 0xfc, 0x75, 0xc7, 0xb0, 0x76, 0xfe, 0x0f, 0x92, 0x9b, 0xce, 0xc5, 0xe8, - 0x9a, 0x5e, 0x8f, 0x16, 0x26, 0x8c, 0x97, 0x20, 0x97, 0x36, 0xca, 0x56, - 0xed, 0xf2, 0x05, 0x53, 0xf7, 0x9f, 0x23, 0xbb, 0x1e, 0xdc, 0x5a, 0x94, - 0x0b, 0x1d, 0x0e, 0x55, 0xc7, 0x34, 0xff, 0xd9, 0xa3, 0x37, 0x69, 0x63, - 0x9f, 0x00, 0x0f, 0xa1, 0x5c, 0x1f, 0x50, 0x56, 0x25, 0xf0, 0xb8, 0x0e, - 0x92, 0x70, 0xcd, 0xa0, 0xca, 0x2a, 0xce, 0xa5, 0x21, 0xe7, 0x5b, 0x10, - 0x13, 0xd5, 0x9b, 0x9f, 0x60, 0x1b, 0x3f, 0x21, 0xa9, 0x27, 0xd9, 0xeb, - 0xdc, 0xe8, 0x05, 0x8e, 0x09, 0x27, 0x4b, 0x8b, 0xb1, 0x3b, 0x07, 0xb1, - 0xe9, 0x55, 0xc4, 0xab, 0x5d, 0x74, 0x11, 0xcf, 0x98, 0x5d, 0x47, 0x58, - 0x9d, 0x08, 0xec, 0x0b, 0x31, 0x69, 0x98, 0xad, 0xd0, 0x93, 0x09, 0xc7, - 0xcc, 0xe3, 0x64, 0x67, 0xef, 0xce, 0x98, 0xf3, 0xc2, 0x69, 0xd4, 0x47, - 0x4d, 0xf7, 0x1a, 0x10, 0xa9, 0x18, 0x35, 0x94, 0xc8, 0xe1, 0xd2, 0xf5, - 0xb5, 0xb4, 0x0b, 0xd7, 0x28, 0xa8, 0x97, 0x9b, 0xbf, 0x90, 0xe5, 0xc6, - 0xde, 0xf7, 0x4f, 0x33, 0xaf, 0x36, 0xe2, 0xa8, 0x65, 0x56, 0xdd, 0xe8, - 0x79, 0xae, 0x68, 0xc1, 0xf3, 0x5b, 0x26, 0x59, 0x53, 0x00, 0x43, 0x4c, - 0x3e, 0xf9, 0x24, 0xc4, 0x8d, 0x73, 0x00, 0x6c, 0xb2, 0x97, 0x56, 0x90, - 0x42, 0xde, 0xba, 0xd6, 0x3a, 0x6d, 0x39, 0x9d, 0xbe, 0x1c, 0xca, 0x24, - 0xbb, 0xba, 0x06, 0xf0, 0x59, 0x74, 0x32, 0x99, 0x1b, 0x02, 0xad, 0xc1, - 0x8b, 0xd4, 0x0b, 0xd8, 0xb7, 0xe7, 0xbd, 0xbd, 0x68, 0x56, 0xc1, 0x1e, - 0xda, 0xa4, 0xfe, 0x6b, 0x94, 0xf3, 0xda, 0x9a, 0x33, 0x01, 0x97, 0xb6, - 0x39, 0xc4, 0xe7, 0x57, 0xee, 0xcf, 0x0e, 0xce, 0x40, 0x7a, 0xd4, 0x4d, - 0x30, 0x6a, 0x57, 0x8f, 0x97, 0x92, 0x59, 0xeb, 0xf2, 0x18, 0x8c, 0x77, - 0xd9, 0x8f, 0x72, 0xff, 0xd5, 0xb2, 0x1f, 0x2e, 0xba, 0xb6, 0x46, 0x1a, - 0x33, 0xe0, 0x74, 0x2a, 0xd7, 0xdb, 0xc7, 0x07, 0x37, 0x2f, 0x55, 0xe2, - 0x70, 0x43, 0xc2, 0xbc, 0x33, 0x03, 0xc9, 0xd4, 0x4e, 0x6e, 0x3e, 0xc9, - 0x67, 0x55, 0xf8, 0x6d, 0x63, 0x9f, 0x6b, 0x3f, 0x5b, 0xc7, 0xe9, 0xb8, - 0x31, 0x04, 0x0b, 0x71, 0x15, 0xcd, 0x34, 0xe4, 0xaf, 0x74, 0x73, 0xea, - 0xbf, 0x20, 0x00, 0x75, 0xd7, 0xa7, 0xf7, 0x9c, 0xf5, 0xa1, 0x28, 0xc7, - 0xfe, 0x6b, 0xa2, 0x36, 0xdc, 0xd4, 0xf0, 0xd7, 0x42, 0x4e, 0xe4, 0x3f, - 0x00, 0x09, 0x3c, 0x5e, 0x1f, 0xc8, 0xfd, 0xb9, 0xd8, 0x90, 0xdb, 0xf4, - 0x41, 0x0b, 0xda, 0x68, 0xe1, 0xe4, 0xb9, 0xfb, 0x36, 0x37, 0xa9, 0x5f, - 0xc9, 0xb6, 0xb8, 0xa4, 0xda, 0x41, 0xaa, 0xab, 0xa8, 0xc8, 0xd3, 0xc6, - 0x6a, 0xbe, 0x03, 0x77, 0xcc, 0x1a, 0x8d, 0x0d, 0xe8, 0xcc, 0x58, 0x46, - 0x71, 0x33, 0x19, 0x62, 0xe5, 0xc4, 0xe3, 0x4a, 0x1d, 0xf7, 0x96, 0xd4, - 0x08, 0xe5, 0xa8, 0x18, 0x40, 0x2d, 0xc5, 0xd7, 0xa7, 0x31, 0xa2, 0x5f, - 0x60, 0xde, 0x21, 0xe5, 0xaa, 0x65, 0x93, 0x0d, 0xdb, 0x55, 0x54, 0x88, - 0xbd, 0x53, 0x8e, 0xe0, 0xa6, 0x23, 0xcd, 0x1d, 0xb7, 0xbd, 0x2a, 0x8c, - 0x0e, 0x67, 0x65, 0xab, 0xda, 0xe9, 0x3b, 0x12, 0xf6, 0x97, 0x4b, 0xe8, - 0x16, 0xf7, 0x09, 0xb6, 0x45, 0x97, 0x16, 0xec, 0xd9, 0xdc, 0x8d, 0x01, - 0xba, 0xb0, 0xb6, 0xdd, 0x59, 0x60, 0xbf, 0x92, 0x92, 0xc3, 0x21, 0x41, - 0x46, 0xcb, 0x5e, 0x6e, 0x99, 0x10, 0x41, 0x45, 0x9a, 0xb9, 0xe0, 0x6d, - 0x22, 0x68, 0xd3, 0x5a, 0xaa, 0x6e, 0xb4, 0xc6, 0x42, 0xa2, 0xad, 0xf1, - 0xf7, 0x0b, 0x3d, 0x29, 0x38, 0xa2, 0x11, 0xf8, 0x57, 0x25, 0xb8, 0x8f, - 0xbc, 0x65, 0xac, 0x0d, 0xf0, 0xb7, 0x5c, 0x95, 0xfb, 0x5d, 0xdb, 0x54, - 0x3d, 0x3e, 0xd6, 0x4f, 0x2a, 0xfe, 0x43, 0xfc, 0x1c, 0xca, 0xb9, 0xb3, - 0x95, 0x06, 0x90, 0xd9, 0x5d, 0x43, 0xc4, 0xe9, 0xbb, 0x17, 0xd6, 0xaf, - 0xf2, 0xb0, 0x24, 0x9d, 0x27, 0xdf, 0xaf, 0xf7, 0x6f, 0xd1, 0x4c, 0xbe, - 0xd0, 0x1d, 0x16, 0x3f, 0xf5, 0x23, 0xdb, 0x52, 0xc4, 0x3b, 0x99, 0x3d, - 0xd5, 0xdc, 0x0b, 0x54, 0x3b, 0xfd, 0x9d, 0x36, 0xf6, 0xd9, 0x63, 0xd4, - 0xc0, 0x8f, 0x9d, 0x00, 0xa6, 0x1e, 0x41, 0x72, 0x18, 0xa6, 0xc5, 0xd0, - 0xb6, 0xdd, 0x10, 0x61, 0x45, 0xe0, 0xdc, 0xcc, 0x92, 0xd3, 0x05, 0x54, - 0x26, 0x2c, 0xcf, 0x94, 0x67, 0xa5, 0xae, 0x62, 0x97, 0x4e, 0x10, 0x2b, - 0xf4, 0x65, 0x89, 0x21, 0x98, 0xad, 0x25, 0x6a, 0x01, 0xa9, 0x4f, 0x57, - 0x2b, 0xbe, 0x3b, 0xcc, 0x34, 0x89, 0xc3, 0xd2, 0xa0, 0xc5, 0x72, 0xd9, - 0x39, 0x3f, 0x45, 0x62, 0x73, 0xda, 0xf3, 0xe7, 0xbf, 0xfd, 0xfe, 0x5b, - 0xe0, 0xc5, 0x9f, 0xf9, 0xbe, 0x2b, 0x9a, 0xf7, 0xc2, 0xe9, 0x59, 0x73, - 0xc4, 0x0a, 0xfe, 0x73, 0x5b, 0x34, 0xb9, 0xfc, 0x45, 0xb7, 0x4d, 0x39, - 0xc2, 0xcd, 0x5f, 0x33, 0x91, 0xab, 0x48, 0x57, 0x0a, 0x27, 0xf3, 0xd4, - 0xf3, 0xb4, 0x57, 0x04, 0xeb, 0x8a, 0xb2, 0xd4, 0x06, 0x60, 0x09, 0x48, - 0x58, 0xf8, 0x1f, 0x06, 0x8c, 0x2d, 0x55, 0x2b, 0x8d, 0xbb, 0x37, 0xbb, - 0xc5, 0xa3, 0x05, 0x38, 0xf7, 0x47, 0x0a, 0xd9, 0xa8, 0x5a, 0x5b, 0x75, - 0x58, 0xa3, 0x35, 0x01, 0x1a, 0x5c, 0xe3, 0x97, 0xef, 0x04, 0xd9, 0x28, - 0x93, 0xc9, 0x59, 0xfc, 0xc1, 0x9b, 0x25, 0xe8, 0x44, 0x05, 0x17, 0xdc, - 0xe1, 0xb2, 0x06, 0xd6, 0x08, 0xe0, 0x00, 0xe0, 0x06, 0xaf, 0xb6, 0xf8, - 0x63, 0x6c, 0x54, 0x29, 0x7a, 0x25, 0x0c, 0xc4, 0xe7, 0x6c, 0x2b, 0xe8, - 0xe9, 0x06, 0xa4, 0x9e, 0xb0, 0x38, 0xd4, 0xf1, 0x46, 0xb3, 0x93, 0x54, - 0xa7, 0xa1, 0xcd, 0x65, 0x43, 0xe8, 0xc3, 0x03, 0x60, 0x9c, 0x39, 0x02, - 0xea, 0xc5, 0x0c, 0x96, 0xd2, 0x05, 0x0d, 0x1f, 0xc7, 0x04, 0xc4, 0xa3, - 0xc4, 0xc0, 0xa9, 0x0b, 0xc7, 0xa1, 0x3f, 0xdc, 0x35, 0x51, 0x4d, 0xc8, - 0xc2, 0x87, 0x99, 0x3c, 0x46, 0xb3, 0x4e, 0xc9, 0xbf, 0xb3, 0x34, 0x8b, - 0xb7, 0x6f, 0xe5, 0x95, 0x9b, 0x17, 0x20, 0x56, 0xa6, 0x64, 0x4c, 0x77, - 0xdc, 0x0e, 0x28, 0xc3, 0xef, 0xf4, 0x28, 0x47, 0xd4, 0x0c, 0x6a, 0xe1, - 0x75, 0x63, 0xc9, 0xae, 0xe9, 0x36, 0x57, 0xfd, 0x08, 0x2f, 0xb2, 0x0b, - 0x48, 0xd4, 0x04, 0x24, 0x2f, 0x17, 0x03, 0x9e, 0xfe, 0xfd, 0x67, 0x0e, - 0xbe, 0x66, 0xcf, 0x2c, 0xaa, 0x4f, 0x1c, 0x32, 0x2e, 0xa0, 0xfb, 0x55, - 0x40, 0x15, 0x5d, 0x51, 0xca, 0xbe, 0xff, 0xb2, 0xb2, 0x2b, 0x47, 0xee, - 0x37, 0xc8, 0x65, 0xad, 0xda, 0xb9, 0x3a, 0x75, 0x3a, 0x98, 0x1f, 0xcf, - 0xd7, 0x48, 0x56, 0xa2, 0xed, 0xb4, 0x46, 0x60, 0x30, 0x6a, 0x19, 0x5b, - 0x38, 0xc8, 0x0d, 0x3a, 0xc3, 0xe1, 0x34, 0x6e, 0x39, 0x5f, 0xf2, 0x4d, - 0x78, 0x02, 0xba, 0x3c, 0x71, 0x70, 0x75, 0x6c, 0xb0, 0xfa, 0x38, 0xe3, - 0x6b, 0x42, 0x1e, 0x23, 0xcd, 0xe6, 0xf8, 0xc5, 0x9c, 0x24, 0x3d, 0x98, - 0xa8, 0xbb, 0x4a, 0x07, 0x8c, 0xb6, 0xfa, 0x13, 0xd0, 0xfc, 0xc5, 0xdc, - 0xb2, 0xcd, 0x65, 0x59, 0xc2, 0x3a, 0x24, 0x47, 0x1c, 0x53, 0x92, 0x57, - 0x21, 0xf3, 0x26, 0x9b, 0xe9, 0xa5, 0x95, 0x9a, 0xd6, 0xa5, 0xe2, 0xda, - 0x0e, 0xb7, 0xab, 0x9e, 0xee, 0xe3, 0xef, 0x59, 0xd2, 0x88, 0x32, 0x1f, - 0x0d, 0xbf, 0xf2, 0xa4, 0x3b, 0xd7, 0xd5, 0xf2, 0xa4, 0xae, 0x65, 0xab, - 0xb3, 0x72, 0xf6, 0x3b, 0xe8, 0xc5, 0x2b, 0xad, 0xcc, 0xbe, 0x02, 0x95, - 0x63, 0x95, 0x2c, 0x22, 0x74, 0x3a, 0x1b, 0xd5, 0xd1, 0x1d, 0xf8, 0x69, - 0x03, 0x98, 0x70, 0x66, 0x43, 0xb5, 0x6d, 0xd0, 0x27, 0x6a, 0x1c, 0xfc, - 0xf9, 0xaf, 0x71, 0x9b, 0x8c, 0xcb, 0xf8, 0xbd, 0x18, 0xad, 0x5f, 0xb7, - 0xbc, 0xfb, 0xbd, 0xde, 0xb9, 0xdc, 0x54, 0x65, 0x3b, 0xaf, 0xa7, 0x92, - 0xbe, 0x62, 0xdc, 0x25, 0x50, 0x48, 0x78, 0xd4, 0xed, 0xed, 0x96, 0x3f, - 0x53, 0xc5, 0xb5, 0x5f, 0xac, 0xa7, 0x5c, 0x92, 0xd9, 0xfe, 0x3b, 0xcd, - 0xbb, 0x29, 0xa0, 0xe0, 0x1e, 0xb0, 0x92, 0xad, 0x6b, 0x45, 0x29, 0x59, - 0xff, 0x5d, 0x5a, 0xfe, 0x8f, 0x63, 0x86, 0x6d, 0xa4, 0x4a, 0x53, 0xc4, - 0x3e, 0x39, 0xbf, 0xe5, 0x20, 0xbc, 0xd1, 0xdf, 0x59, 0x9c, 0x3a, 0x72, - 0x3b, 0x8f, 0xb2, 0x40, 0xe5, 0x9e, 0xa5, 0x02, 0x35, 0xd0, 0x4d, 0x6f, - 0x7d, 0xd5, 0x4c, 0xde, 0x51, 0x0a, 0x9a, 0x57, 0x43, 0x43, 0xe5, 0x97, - 0x95, 0x4b, 0xb2, 0x6c, 0xaf, 0x92, 0x4e, 0x52, 0x06, 0x0b, 0x72, 0x60, - 0x9e, 0x5c, 0xa1, 0xe3, 0x9b, 0xb3, 0x8c, 0x32, 0xcd, 0xc1, 0x4a, 0x88, - 0xd6, 0x3d, 0xed, 0xe8, 0x42, 0x5d, 0x53, 0xdd, 0x00, 0x52, 0x26, 0x2e, - 0xd5, 0x41, 0xf2, 0xfc, 0x51, 0x40, 0x45, 0xe4, 0x00, 0xe3, 0x1c, 0xfb, - 0x32, 0x33, 0x22, 0xed, 0x15, 0x12, 0x9b, 0xc4, 0x89, 0xd0, 0x0e, 0x95, - 0xad, 0xfd, 0x04, 0x2e, 0xee, 0x73, 0x06, 0xee, 0x23, 0xe2, 0xd3, 0x3d, - 0x44, 0x62, 0x35, 0xdc, 0x18, 0x9d, 0xf4, 0x9d, 0x92, 0x00, 0x4e, 0x8e, - 0x4e, 0x24, 0xa1, 0x2c, 0xb2, 0xb2, 0x3f, 0xfc, 0xe4, 0x27, 0x43, 0x3b, - 0x59, 0xb4, 0x13, 0xff, 0x57, 0xdf, 0x3d, 0xee, 0x1a, 0xab, 0x8c, 0x51, - 0xd9, 0x96, 0x1f, 0x2b, 0x66, 0x67, 0x42, 0xb6, 0x91, 0xfe, 0x8f, 0x4d, - 0xa6, 0xd3, 0x3b, 0x51, 0x45, 0x35, 0xab, 0xe5, 0x6e, 0x07, 0xed, 0x24, - 0x95, 0x3d, 0x6a, 0x47, 0x3f, 0x4e, 0xe4, 0x13, 0x5f, 0xfc, 0x19, 0xe8, - 0x09, 0x4b, 0x3d, 0xdf, 0x4f, 0xb4, 0xb4, 0xc1, 0x74, 0x31, 0xff, 0x13, - 0x00, 0xaf, 0x07, 0x16, 0xb6, 0x57, 0xfe, 0x6a, 0x37, 0x05, 0x62, 0x01, - 0xa0, 0xfa, 0xe2, 0xe5, 0x57, 0xcb, 0xa4, 0x5a, 0x57, 0xee, 0xd1, 0x5f, - 0x14, 0x23, 0xbe, 0xef, 0x9b, 0x91, 0x0f, 0x97, 0xa8, 0xf2, 0x36, 0xf7, - 0xc3, 0xb6, 0xbe, 0xe5, 0x59, 0x2b, 0x3c, 0xb3, 0x5d, 0x9f, 0x1e, 0x3b, - 0xd3, 0xf7, 0xee, 0x2e, 0xc0, 0x73, 0x6f, 0x2e, 0xfd, 0xc7, 0x3f, 0xfd, - 0x9c, 0xac, 0xbd, 0xa1, 0x8e, 0xcc, 0x59, 0x41, 0xa4, 0x41, 0xd3, 0x39, - 0x28, 0x67, 0x96, 0x14, 0x42, 0xc3, 0x38, 0x96, 0x0d, 0xfc, 0x68, 0x3d, - 0x2e, 0x2f, 0x46, 0x24, 0x66, 0x0d, 0xa6, 0x72, 0xc7, 0x27, 0x66, 0x3c, - 0xad, 0x55, 0xae, 0xbd, 0x34, 0xb4, 0x3b, 0x60, 0x73, 0xa5, 0xaa, 0xd4, - 0x56, 0x0b, 0x61, 0xf5, 0x5c, 0x66, 0x2e, 0x9d, 0x33, 0xfe, 0xfe, 0x7b, - 0x21, 0xbc, 0x36, 0xec, 0x0f, 0x03, 0x28, 0xa4, 0xd6, 0x05, 0x21, 0x30, - 0xf8, 0x3c, 0xd9, 0x3b, 0xaf, 0x5d, 0x92, 0x25, 0xce, 0xac, 0x28, 0xe1, - 0xd1, 0x02, 0x3c, 0x49, 0xe6, 0xed, 0xb7, 0x0e, 0xe7, 0xe7, 0x1e, 0x56, - 0xbf, 0x5d, 0xfd, 0xed, 0xdb, 0x4d, 0x63, 0x03, 0x8c, 0x06, 0x30, 0xfa, - 0x62, 0x78, 0x3f, 0x6e, 0x63, 0x1e, 0xa6, 0x4b, 0x96, 0xe9, 0xe4, 0x2d, - 0x16, 0x51, 0xf2, 0xf1, 0xa7, 0x2a, 0xeb, 0x15, 0xb5, 0xb1, 0x04, 0x9a, - 0xde, 0x77, 0xde, 0xcf, 0xcc, 0x21, 0xd9, 0x30, 0xf1, 0xea, 0xb9, 0xb0, - 0x39, 0xe1, 0x6f, 0xc7, 0x0a, 0xbd, 0x64, 0x75, 0x59, 0xbf, 0x3c, 0xbf, - 0xd0, 0xdb, 0x00, 0xfa, 0x2e, 0x36, 0xcc, 0xb5, 0xd1, 0x20, 0x46, 0xb0, - 0xd7, 0xfc, 0xb1, 0x5b, 0x54, 0x9f, 0xe2, 0xe1, 0xd0, 0x18, 0xa3, 0x51, - 0x62, 0x24, 0x0f, 0xa1, 0xa1, 0x9a, 0x47, 0x33, 0xca, 0xb9, 0x26, 0xb6, - 0x0b, 0x46, 0xd4, 0xb5, 0xc6, 0xbb, 0x72, 0x1e, 0x60, 0xeb, 0xb4, 0x9d, - 0x9f, 0x09, 0x10, 0x12, 0xce, 0x68, 0xa3, 0xb6, 0x8c, 0xce, 0xd7, 0x26, - 0x55, 0xb5, 0x90, 0x08, 0x9f, 0xf2, 0xa8, 0xc0, 0x56, 0xd8, 0xf6, 0x29, - 0x60, 0xe0, 0x73, 0x52, 0x22, 0x6f, 0x35, 0x4e, 0xe7, 0xc5, 0xa3, 0x95, - 0xcd, 0xd0, 0x8e, 0xd3, 0x95, 0xe3, 0x03, 0x04, 0x00, 0x54, 0xeb, 0xef, - 0x27, 0x11, 0xef, 0x38, 0x56, 0x6f, 0xa0, 0xe5, 0x72, 0x2a, 0x97, 0x23, - 0x56, 0xe2, 0x93, 0x21, 0x3f, 0xe2, 0xd6, 0x12, 0xcd, 0x61, 0x50, 0x44, - 0xd3, 0xe3, 0x8d, 0x3f, 0x24, 0x90, 0x6c, 0x53, 0xad, 0x1c, 0xad, 0x03, - 0x0f, 0x89, 0x63, 0xf9, 0xb9, 0xbc, 0xe2, 0x56, 0xdd, 0x16, 0xcf, 0x2d, - 0xa1, 0xda, 0xf9, 0x3f, 0xec, 0xbf, 0xb1, 0xb6, 0xe1, 0xdf, 0x3f, 0x11, - 0x02, 0x76, 0xe9, 0xe2, 0x9f, 0xa2, 0x02, 0xce, 0x3e, 0xf9, 0xcf, 0x4f, - 0xd9, 0x5f, 0x72, 0x5d, 0x51, 0xa7, 0x1d, 0x98, 0xeb, 0x8e, 0x97, 0x98, - 0x39, 0x58, 0x52, 0x11, 0xed, 0x95, 0x3c, 0x94, 0xf0, 0x6c, 0xa2, 0x3e, - 0x5f, 0x5f, 0x05, 0x98, 0xf1, 0x73, 0xab, 0xc7, 0xa8, 0x4b, 0x92, 0x73, - 0xda, 0x59, 0x1d, 0x56, 0x11, 0xc2, 0x38, 0x43, 0xdb, 0x4b, 0xbe, 0x08, - 0xdd, 0xf2, 0x5d, 0x47, 0x26, 0xdc, 0x16, 0xf9, 0x62, 0xf8, 0x92, 0x19, - 0x5c, 0x6f, 0x2b, 0xe1, 0x15, 0x66, 0xfa, 0xdb, 0x3a, 0xe0, 0x92, 0x9c, - 0x70, 0x91, 0x3f, 0xb8, 0xb0, 0x01, 0xc1, 0x44, 0xf6, 0x62, 0x47, 0x37, - 0xe9, 0xd9, 0x4c, 0x0f, 0x99, 0x6a, 0xc4, 0x60, 0x26, 0x2f, 0xc6, 0x43, - 0x50, 0x62, 0xee, 0x44, 0x21, 0xbd, 0xad, 0x50, 0x2d, 0x58, 0x78, 0xea, - 0x5a, 0x5f, 0x5c, 0xf7, 0x28, 0xa9, 0xdf, 0x0e, 0xd3, 0x67, 0xdf, 0x1f, - 0x4c, 0xd3, 0xe9, 0x5e, 0x0f, 0xa3, 0xb7, 0x56, 0xa5, 0x4e, 0x5f, 0x2a, - 0xb6, 0x14, 0x5e, 0x2f, 0x16, 0x71, 0x48, 0x59, 0x77, 0x6b, 0xf9, 0x6c, - 0x79, 0xba, 0xc4, 0x26, 0x30, 0x44, 0x61, 0x62, 0x60, 0xef, 0x35, 0x95, - 0xe3, 0x77, 0xd5, 0xc8, 0x44, 0xa4, 0xf8, 0x95, 0xba, 0xd1, 0x73, 0x6f, - 0x92, 0xf2, 0xd3, 0x98, 0x4c, 0x8f, 0xe0, 0x2e, 0x27, 0xaa, 0x2f, 0x63, - 0x00, 0x00, 0x00, 0x00, 0x06, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x80, 0x04, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x26, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, - 0x0e, 0xfe, 0xff, 0xff, 0xbb, 0xfd, 0xff, 0xff, 0xe1, 0x05, 0x00, 0x00, - 0x4b, 0x0f, 0x00, 0x00, 0x8e, 0x15, 0x00, 0x00, 0x7f, 0x04, 0x00, 0x00, - 0x02, 0x02, 0x00, 0x00, 0x53, 0xe6, 0xff, 0xff, 0xa6, 0x04, 0x00, 0x00, - 0xdf, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x66, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x7f, 0xfd, 0xff, 0xff, 0x3e, 0xf8, 0xff, 0xff, - 0xae, 0x03, 0x00, 0x00, 0x5c, 0xfe, 0xff, 0xff, 0x82, 0xfa, 0xff, 0xff, - 0xbd, 0xf8, 0xff, 0xff, 0x04, 0xfe, 0xff, 0xff, 0x8c, 0xfe, 0xff, 0xff, - 0x9b, 0xf8, 0xff, 0xff, 0x51, 0x02, 0x00, 0x00, 0x19, 0xfe, 0xff, 0xff, - 0x54, 0xfe, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xe7, 0xfd, 0xff, 0xff, - 0xc2, 0x07, 0x00, 0x00, 0x36, 0x06, 0x00, 0x00, 0x57, 0xfd, 0xff, 0xff, - 0xa3, 0x03, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x79, 0x03, 0x00, 0x00, - 0x9b, 0xf7, 0xff, 0xff, 0xc7, 0x04, 0x00, 0x00, 0xbf, 0x06, 0x00, 0x00, - 0x86, 0xfe, 0xff, 0xff, 0x20, 0xfb, 0xff, 0xff, 0x90, 0xfc, 0xff, 0xff, - 0x16, 0x00, 0x00, 0x00, 0x8e, 0xff, 0xff, 0xff, 0xa0, 0x03, 0x00, 0x00, - 0xc7, 0xff, 0xff, 0xff, 0x51, 0x01, 0x00, 0x00, 0x24, 0xf8, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xee, 0x01, 0x00, 0x00, - 0xda, 0x02, 0x00, 0x00, 0xa9, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xc4, 0xfe, 0xff, 0xff, 0xfa, 0xfc, 0xff, 0xff, 0xc0, 0xff, 0xff, 0xff, - 0x6a, 0xff, 0xff, 0xff, 0x92, 0x02, 0x00, 0x00, 0xa4, 0xff, 0xff, 0xff, - 0xfd, 0xfe, 0xff, 0xff, 0x4e, 0xfd, 0xff, 0xff, 0x87, 0x00, 0x00, 0x00, - 0x19, 0xfe, 0xff, 0xff, 0x17, 0xff, 0xff, 0xff, 0xa0, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xf4, 0xf3, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x04, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x4d, 0x4c, 0x49, 0x52, - 0x20, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0xf8, 0x01, 0x00, 0x00, 0xec, 0x01, 0x00, 0x00, - 0xe0, 0x01, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x6d, 0x61, 0x69, 0x6e, 0x00, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0xa4, 0x01, 0x00, 0x00, 0x4c, 0x01, 0x00, 0x00, - 0xfc, 0x00, 0x00, 0x00, 0xa8, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, - 0x38, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x82, 0xfe, 0xff, 0xff, - 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x10, 0x00, 0x07, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x03, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xc8, 0xf4, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0xe6, 0xfe, 0xff, 0xff, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x1a, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, - 0x07, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x01, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x07, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, - 0xc2, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x24, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xb4, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, - 0x07, 0x00, 0x10, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x10, 0x00, 0x06, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x07, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x10, 0x00, 0x04, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0d, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xb8, 0x0d, 0x00, 0x00, - 0x64, 0x0c, 0x00, 0x00, 0x64, 0x0a, 0x00, 0x00, 0xe8, 0x09, 0x00, 0x00, - 0x9c, 0x09, 0x00, 0x00, 0x20, 0x09, 0x00, 0x00, 0x6c, 0x07, 0x00, 0x00, - 0x78, 0x04, 0x00, 0x00, 0x74, 0x03, 0x00, 0x00, 0x68, 0x02, 0x00, 0x00, - 0xbc, 0x01, 0x00, 0x00, 0x28, 0x01, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, - 0x54, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xc8, 0xff, 0xff, 0xff, - 0x28, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x14, 0x00, 0x10, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x30, 0xf3, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0x6c, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0x00, 0x00, 0x14, 0xf3, 0xff, 0xff, - 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x13, 0xc2, 0x47, 0x3b, - 0x01, 0x00, 0x00, 0x00, 0x8d, 0xf4, 0xad, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0x15, 0x00, 0xe0, 0xbe, 0x0d, 0x00, 0x00, 0x00, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x38, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0xb0, 0xf3, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x7c, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x80, 0x04, 0x00, 0x00, 0x94, 0xf3, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x6c, 0x02, 0xa5, 0x3a, - 0x01, 0x00, 0x00, 0x00, 0x6a, 0x5d, 0xa4, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x66, 0x6c, 0x61, 0x74, 0x74, - 0x65, 0x6e, 0x2f, 0x52, 0x65, 0x73, 0x68, 0x61, 0x70, 0x65, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0x04, 0x00, 0x00, - 0x40, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x8c, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x2c, 0xf4, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x6c, 0x02, 0xa5, 0x3a, 0x01, 0x00, 0x00, 0x00, - 0x6a, 0x5d, 0xa4, 0x3e, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x69, - 0x6e, 0x67, 0x32, 0x64, 0x2f, 0x4d, 0x61, 0x78, 0x50, 0x6f, 0x6f, 0x6c, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xe8, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xec, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xd4, 0xf4, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x6c, 0x02, 0xa5, 0x3a, 0x01, 0x00, 0x00, 0x00, - 0x6a, 0x5d, 0xa4, 0x3e, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x83, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, - 0x52, 0x65, 0x6c, 0x75, 0x3b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, - 0x2f, 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, 0x3b, 0x73, 0x65, 0x71, - 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, - 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x43, 0x6f, 0x6e, 0x76, 0x32, 0x44, 0x3b, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x42, 0x69, 0x61, 0x73, - 0x41, 0x64, 0x64, 0x2f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, - 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xf0, 0xf5, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xe4, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x0e, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xdc, 0xf5, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x15, 0xa1, 0x10, 0x3b, 0x01, 0x00, 0x00, 0x00, - 0x74, 0x10, 0x10, 0x3f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x7b, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x52, 0x65, - 0x6c, 0x75, 0x3b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x42, 0x69, 0x61, - 0x73, 0x41, 0x64, 0x64, 0x3b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x43, - 0x6f, 0x6e, 0x76, 0x32, 0x44, 0x3b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, - 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, 0x2f, 0x52, 0x65, 0x61, 0x64, - 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x2f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x3a, 0xf8, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0xd4, 0x02, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0xac, 0x02, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xc4, 0xf6, 0xff, 0xff, 0x1c, 0x02, 0x00, 0x00, - 0x94, 0x01, 0x00, 0x00, 0x0c, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xb9, 0x37, 0x74, 0x3a, 0x8b, 0xfe, 0x77, 0x3a, 0x54, 0xc7, 0x75, 0x3a, - 0xc4, 0x11, 0x78, 0x3a, 0xb9, 0x90, 0x74, 0x3a, 0x3b, 0x97, 0x7b, 0x3a, - 0xe8, 0x57, 0x75, 0x3a, 0x0c, 0x0e, 0x74, 0x3a, 0x76, 0x8b, 0x79, 0x3a, - 0x2b, 0x7b, 0x6d, 0x3a, 0x17, 0xad, 0x71, 0x3a, 0xe4, 0x9b, 0x77, 0x3a, - 0x0b, 0xab, 0x7a, 0x3a, 0x9e, 0x12, 0x75, 0x3a, 0x8c, 0xcf, 0x79, 0x3a, - 0xa0, 0x5a, 0x79, 0x3a, 0x74, 0xc3, 0x78, 0x3a, 0x0e, 0xa9, 0x74, 0x3a, - 0x6b, 0xf8, 0x6f, 0x3a, 0x53, 0xeb, 0x72, 0x3a, 0xff, 0xe2, 0x73, 0x3a, - 0x3b, 0x38, 0x78, 0x3a, 0xed, 0x9e, 0x76, 0x3a, 0x77, 0xbc, 0x6d, 0x3a, - 0x4f, 0xf5, 0x71, 0x3a, 0x17, 0xc9, 0x74, 0x3a, 0x87, 0x84, 0x6b, 0x3a, - 0x4b, 0xc5, 0x78, 0x3a, 0xdd, 0x02, 0x75, 0x3a, 0x0e, 0xcf, 0x78, 0x3a, - 0x14, 0x40, 0x75, 0x3a, 0x2e, 0xca, 0x72, 0x3a, 0x20, 0x00, 0x00, 0x00, - 0x95, 0x2f, 0xef, 0x3d, 0x47, 0x1c, 0xf0, 0x3d, 0xc5, 0xdb, 0xf3, 0x3d, - 0x2e, 0x57, 0xe7, 0x3d, 0x98, 0xa7, 0xf2, 0x3d, 0x98, 0x89, 0xe4, 0x3d, - 0x38, 0x6d, 0xf3, 0x3d, 0x3f, 0x38, 0xe2, 0x3d, 0x91, 0x6f, 0xf0, 0x3d, - 0x35, 0xa0, 0xeb, 0x3d, 0x42, 0x3d, 0xeb, 0x3d, 0xed, 0x89, 0xe7, 0x3d, - 0xb5, 0xb5, 0xf8, 0x3d, 0x79, 0x28, 0xf3, 0x3d, 0xed, 0xdb, 0xf7, 0x3d, - 0xeb, 0x67, 0xf7, 0x3d, 0xed, 0xd1, 0xf6, 0x3d, 0xbc, 0xbf, 0xf2, 0x3d, - 0x7a, 0x18, 0xee, 0x3d, 0x7c, 0x05, 0xf1, 0x3d, 0x63, 0x69, 0xe8, 0x3d, - 0xbb, 0xc0, 0xf1, 0x3d, 0xaf, 0xb1, 0xf4, 0x3d, 0xfe, 0xe0, 0xeb, 0x3d, - 0xb6, 0x60, 0xec, 0x3d, 0x8c, 0x32, 0xf0, 0x3d, 0x7e, 0xad, 0xe9, 0x3d, - 0xc0, 0xd3, 0xf6, 0x3d, 0xd7, 0x18, 0xf3, 0x3d, 0x40, 0x53, 0xf0, 0x3d, - 0x2c, 0xdc, 0xf1, 0x3d, 0x9a, 0xe4, 0xf0, 0x3d, 0x20, 0x00, 0x00, 0x00, - 0x4a, 0x4f, 0xf2, 0xbd, 0x8e, 0x0e, 0xf6, 0xbd, 0x74, 0x46, 0xec, 0xbd, - 0xa0, 0x21, 0xf6, 0xbd, 0x8e, 0x27, 0xf0, 0xbd, 0x0d, 0xa0, 0xf9, 0xbd, - 0x0c, 0x97, 0xec, 0xbd, 0xf0, 0x25, 0xf2, 0xbd, 0x5f, 0x98, 0xf7, 0xbd, - 0x27, 0x8d, 0xe8, 0xbd, 0xbd, 0xc9, 0xef, 0xbd, 0xac, 0xac, 0xf5, 0xbd, - 0x5a, 0x94, 0xed, 0xbd, 0x5a, 0x64, 0xf1, 0xbd, 0x2a, 0xa7, 0xe9, 0xbd, - 0x3c, 0x93, 0xf3, 0xbd, 0xf8, 0x2b, 0xf3, 0xbd, 0xf6, 0x35, 0xed, 0xbd, - 0x94, 0xf4, 0xed, 0xbd, 0x70, 0x94, 0xe9, 0xbd, 0x39, 0xfb, 0xf1, 0xbd, - 0xcb, 0x47, 0xf6, 0xbd, 0x88, 0xb9, 0xe7, 0xbd, 0x49, 0x62, 0xe9, 0xbd, - 0x64, 0x11, 0xf0, 0xbd, 0x85, 0xdf, 0xf2, 0xbd, 0x5c, 0x61, 0xe8, 0xbd, - 0x22, 0x46, 0xf3, 0xbd, 0x5a, 0x8e, 0xf0, 0xbd, 0x70, 0xdd, 0xf6, 0xbd, - 0x94, 0x55, 0xf3, 0xbd, 0x57, 0xba, 0xf0, 0xbd, 0x1a, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x43, 0x6f, 0x6e, 0x76, - 0x32, 0x44, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x2a, 0xfb, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x94, 0x01, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x6c, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xb4, 0xf9, 0xff, 0xff, 0x1c, 0x01, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, - 0x8c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xe6, 0x69, 0xc5, 0x3a, 0xa0, 0x8d, 0xa8, 0x3a, 0xfe, 0x5c, 0xc1, 0x3a, - 0x84, 0x01, 0xcb, 0x3a, 0xa2, 0xc2, 0xb5, 0x3a, 0x42, 0x01, 0xd1, 0x3a, - 0xd7, 0x01, 0xcc, 0x3a, 0x20, 0xd8, 0xc7, 0x3a, 0x28, 0x80, 0xa4, 0x3a, - 0xd9, 0x25, 0xbe, 0x3a, 0x39, 0x6f, 0xc4, 0x3a, 0x59, 0x6c, 0xcb, 0x3a, - 0xb8, 0x0a, 0xc2, 0x3a, 0x73, 0x3f, 0xca, 0x3a, 0xb9, 0xed, 0xc5, 0x3a, - 0xe9, 0x9f, 0xc1, 0x3a, 0x10, 0x00, 0x00, 0x00, 0x5b, 0x2e, 0x2f, 0x3e, - 0x3e, 0xd9, 0x06, 0x3e, 0x44, 0xda, 0x3f, 0x3e, 0xd3, 0x09, 0x22, 0x3e, - 0x1d, 0x57, 0x34, 0x3e, 0xa4, 0xb6, 0x44, 0x3e, 0xd3, 0x69, 0x4a, 0x3e, - 0x70, 0x48, 0x46, 0x3e, 0x28, 0x37, 0x23, 0x3e, 0xe6, 0xdb, 0x06, 0x3e, - 0x3c, 0x1d, 0x34, 0x3e, 0x36, 0xba, 0x16, 0x3e, 0x24, 0xa4, 0x34, 0x3e, - 0xf4, 0xfb, 0x37, 0x3e, 0xd6, 0x7b, 0x8a, 0x3d, 0x00, 0x85, 0xe3, 0x3d, - 0x10, 0x00, 0x00, 0x00, 0x12, 0xdf, 0x43, 0xbe, 0x85, 0x3c, 0x27, 0xbe, - 0x54, 0xcd, 0x0d, 0xbe, 0x81, 0x6b, 0x49, 0xbe, 0x33, 0xb1, 0xe7, 0xbd, - 0x3f, 0x5f, 0x4f, 0xbe, 0xa1, 0x63, 0x3e, 0xbe, 0xbb, 0xa7, 0xea, 0xbd, - 0x2d, 0x8c, 0x0e, 0xbe, 0x8d, 0xa9, 0x3c, 0xbe, 0x5b, 0xe6, 0x42, 0xbe, - 0x80, 0xd5, 0x49, 0xbe, 0xa3, 0x86, 0x40, 0xbe, 0xf4, 0xaa, 0x48, 0xbe, - 0xde, 0x61, 0x44, 0xbe, 0xa9, 0x1c, 0x40, 0xbe, 0x18, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x43, 0x6f, 0x6e, 0x76, 0x32, 0x44, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xda, 0xfc, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x64, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x64, 0xfb, 0xff, 0xff, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x07, 0x72, 0x1e, 0x3a, 0x01, 0x00, 0x00, 0x00, 0x32, 0xe2, 0x9b, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x23, 0x35, 0x9d, 0xbd, 0x17, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, - 0x65, 0x6e, 0x73, 0x65, 0x2f, 0x4d, 0x61, 0x74, 0x4d, 0x75, 0x6c, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x80, 0x04, 0x00, 0x00, - 0x52, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x02, 0x38, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x66, - 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x2f, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x9a, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x02, 0x68, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x8c, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xfc, 0x41, 0x4c, 0x35, 0x30, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, - 0x65, 0x6e, 0x73, 0x65, 0x2f, 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, - 0x2f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, - 0x65, 0x4f, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x12, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x02, 0xdc, 0x01, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x9c, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x04, 0xfe, 0xff, 0xff, 0x0c, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x03, 0xf9, 0x09, 0x36, 0x3a, 0x1b, 0x0c, 0x36, 0xc6, 0xda, 0x0a, 0x36, - 0x16, 0x26, 0x0c, 0x36, 0x4b, 0x2b, 0x0a, 0x36, 0x60, 0x23, 0x0e, 0x36, - 0xd3, 0x9b, 0x0a, 0x36, 0x78, 0xe1, 0x09, 0x36, 0x78, 0xfb, 0x0c, 0x36, - 0xb6, 0x2a, 0x06, 0x36, 0x6f, 0x89, 0x08, 0x36, 0x7e, 0xe3, 0x0b, 0x36, - 0xf0, 0x9d, 0x0d, 0x36, 0xae, 0x74, 0x0a, 0x36, 0xef, 0x21, 0x0d, 0x36, - 0xe0, 0xdf, 0x0c, 0x36, 0x79, 0x8a, 0x0c, 0x36, 0x0a, 0x39, 0x0a, 0x36, - 0xbb, 0x92, 0x07, 0x36, 0x39, 0x3d, 0x09, 0x36, 0x25, 0xc9, 0x09, 0x36, - 0xd1, 0x3b, 0x0c, 0x36, 0x93, 0x54, 0x0b, 0x36, 0x9a, 0x4f, 0x06, 0x36, - 0x3c, 0xb2, 0x08, 0x36, 0x23, 0x4b, 0x0a, 0x36, 0xbe, 0x0e, 0x05, 0x36, - 0x83, 0x8b, 0x0c, 0x36, 0xc7, 0x6b, 0x0a, 0x36, 0x07, 0x91, 0x0c, 0x36, - 0x5d, 0x8e, 0x0a, 0x36, 0x7f, 0x2a, 0x09, 0x36, 0x33, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x42, 0x69, 0x61, 0x73, - 0x41, 0x64, 0x64, 0x2f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, - 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0c, 0x00, - 0x10, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xec, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xe1, 0x22, 0xc6, 0x36, 0x90, 0x2b, 0xa9, 0x36, 0x2d, 0x12, 0xc2, 0x36, - 0xbc, 0xbf, 0xcb, 0x36, 0xf2, 0x6c, 0xb6, 0x36, 0x19, 0xc5, 0xd1, 0x36, - 0xff, 0xc0, 0xcc, 0x36, 0x62, 0x93, 0xc8, 0x36, 0x4c, 0x1a, 0xa5, 0x36, - 0x05, 0xd8, 0xbe, 0x36, 0x49, 0x27, 0xc5, 0x36, 0xf5, 0x2a, 0xcc, 0x36, - 0x8a, 0xc0, 0xc2, 0x36, 0xf5, 0xfc, 0xca, 0x36, 0x2f, 0xa7, 0xc6, 0x36, - 0x57, 0x55, 0xc2, 0x36, 0x31, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, - 0x64, 0x2f, 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, 0x2f, 0x52, 0x65, - 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, - 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x1c, 0x00, - 0x08, 0x00, 0x07, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x18, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x14, 0x00, 0x04, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xf0, 0x77, 0x80, 0x3b, - 0x01, 0x00, 0x00, 0x00, 0xf0, 0xee, 0x7f, 0x3f, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x63, 0x6f, 0x6e, 0x76, - 0x32, 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x69, 0x6e, 0x74, - 0x38, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xca, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x06, - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0xe6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, - 0x06, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x16, 0x0a, 0x00, - 0x0e, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x11, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, - 0x0c, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00}; - -const unsigned int kTestConvModelDataSize = 21344; diff --git a/code/components/tfmicro/tensorflow/lite/portable_type_to_tflitetype.h b/code/components/tfmicro/tensorflow/lite/portable_type_to_tflitetype.h index 32423a44..83a0ac6c 100644 --- a/code/components/tfmicro/tensorflow/lite/portable_type_to_tflitetype.h +++ b/code/components/tfmicro/tensorflow/lite/portable_type_to_tflitetype.h @@ -16,8 +16,8 @@ limitations under the License. #define TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ // Most of the definitions have been moved to this subheader so that Micro -// can include it without relying on , which isn't available on all -// platforms. +// can include it without relying on and , which isn't +// available on all platforms. // Arduino build defines abs as a macro here. That is invalid C++, and breaks // libc++'s header, undefine it. @@ -25,7 +25,7 @@ limitations under the License. #undef abs #endif -#include +#include #include "tensorflow/lite/c/common.h" @@ -59,16 +59,16 @@ struct TfLiteTypeToType {}; // Specializations below // No string mapping is included here, since the TF Lite packed representation // doesn't correspond to a C++ type well. MATCH_TYPE_AND_TFLITE_TYPE(int32_t, kTfLiteInt32); +MATCH_TYPE_AND_TFLITE_TYPE(uint32_t, kTfLiteUInt32); MATCH_TYPE_AND_TFLITE_TYPE(int16_t, kTfLiteInt16); MATCH_TYPE_AND_TFLITE_TYPE(int64_t, kTfLiteInt64); MATCH_TYPE_AND_TFLITE_TYPE(float, kTfLiteFloat32); MATCH_TYPE_AND_TFLITE_TYPE(unsigned char, kTfLiteUInt8); MATCH_TYPE_AND_TFLITE_TYPE(int8_t, kTfLiteInt8); MATCH_TYPE_AND_TFLITE_TYPE(bool, kTfLiteBool); -MATCH_TYPE_AND_TFLITE_TYPE(std::complex, kTfLiteComplex64); -MATCH_TYPE_AND_TFLITE_TYPE(std::complex, kTfLiteComplex128); MATCH_TYPE_AND_TFLITE_TYPE(TfLiteFloat16, kTfLiteFloat16); MATCH_TYPE_AND_TFLITE_TYPE(double, kTfLiteFloat64); +MATCH_TYPE_AND_TFLITE_TYPE(uint64_t, kTfLiteUInt64); } // namespace tflite #endif // TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ diff --git a/code/components/tfmicro/tensorflow/lite/schema/schema_generated.h b/code/components/tfmicro/tensorflow/lite/schema/schema_generated.h index 84442b86..1dc710b9 100644 --- a/code/components/tfmicro/tensorflow/lite/schema/schema_generated.h +++ b/code/components/tfmicro/tensorflow/lite/schema/schema_generated.h @@ -49,6 +49,9 @@ struct TensorT; struct Conv2DOptions; struct Conv2DOptionsT; +struct Conv3DOptions; +struct Conv3DOptionsT; + struct Pool2DOptions; struct Pool2DOptionsT; @@ -358,6 +361,21 @@ struct CumsumOptionsT; struct BroadcastToOptions; struct BroadcastToOptionsT; +struct Rfft2dOptions; +struct Rfft2dOptionsT; + +struct HashtableOptions; +struct HashtableOptionsT; + +struct HashtableFindOptions; +struct HashtableFindOptionsT; + +struct HashtableImportOptions; +struct HashtableImportOptionsT; + +struct HashtableSizeOptions; +struct HashtableSizeOptionsT; + struct OperatorCode; struct OperatorCodeT; @@ -395,11 +413,15 @@ enum TensorType { TensorType_INT8 = 9, TensorType_FLOAT64 = 10, TensorType_COMPLEX128 = 11, + TensorType_UINT64 = 12, + TensorType_RESOURCE = 13, + TensorType_VARIANT = 14, + TensorType_UINT32 = 15, TensorType_MIN = TensorType_FLOAT32, - TensorType_MAX = TensorType_COMPLEX128 + TensorType_MAX = TensorType_UINT32 }; -inline const TensorType (&EnumValuesTensorType())[12] { +inline const TensorType (&EnumValuesTensorType())[16] { static const TensorType values[] = { TensorType_FLOAT32, TensorType_FLOAT16, @@ -412,13 +434,17 @@ inline const TensorType (&EnumValuesTensorType())[12] { TensorType_COMPLEX64, TensorType_INT8, TensorType_FLOAT64, - TensorType_COMPLEX128 + TensorType_COMPLEX128, + TensorType_UINT64, + TensorType_RESOURCE, + TensorType_VARIANT, + TensorType_UINT32 }; return values; } inline const char * const *EnumNamesTensorType() { - static const char * const names[13] = { + static const char * const names[17] = { "FLOAT32", "FLOAT16", "INT32", @@ -431,13 +457,17 @@ inline const char * const *EnumNamesTensorType() { "INT8", "FLOAT64", "COMPLEX128", + "UINT64", + "RESOURCE", + "VARIANT", + "UINT32", nullptr }; return names; } inline const char *EnumNameTensorType(TensorType e) { - if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_COMPLEX128)) return ""; + if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_UINT32)) return ""; const size_t index = static_cast(e); return EnumNamesTensorType()[index]; } @@ -800,11 +830,20 @@ enum BuiltinOperator { BuiltinOperator_CUMSUM = 128, BuiltinOperator_CALL_ONCE = 129, BuiltinOperator_BROADCAST_TO = 130, + BuiltinOperator_RFFT2D = 131, + BuiltinOperator_CONV_3D = 132, + BuiltinOperator_IMAG = 133, + BuiltinOperator_REAL = 134, + BuiltinOperator_COMPLEX_ABS = 135, + BuiltinOperator_HASHTABLE = 136, + BuiltinOperator_HASHTABLE_FIND = 137, + BuiltinOperator_HASHTABLE_IMPORT = 138, + BuiltinOperator_HASHTABLE_SIZE = 139, BuiltinOperator_MIN = BuiltinOperator_ADD, - BuiltinOperator_MAX = BuiltinOperator_BROADCAST_TO + BuiltinOperator_MAX = BuiltinOperator_HASHTABLE_SIZE }; -inline const BuiltinOperator (&EnumValuesBuiltinOperator())[131] { +inline const BuiltinOperator (&EnumValuesBuiltinOperator())[140] { static const BuiltinOperator values[] = { BuiltinOperator_ADD, BuiltinOperator_AVERAGE_POOL_2D, @@ -936,13 +975,22 @@ inline const BuiltinOperator (&EnumValuesBuiltinOperator())[131] { BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES, BuiltinOperator_CUMSUM, BuiltinOperator_CALL_ONCE, - BuiltinOperator_BROADCAST_TO + BuiltinOperator_BROADCAST_TO, + BuiltinOperator_RFFT2D, + BuiltinOperator_CONV_3D, + BuiltinOperator_IMAG, + BuiltinOperator_REAL, + BuiltinOperator_COMPLEX_ABS, + BuiltinOperator_HASHTABLE, + BuiltinOperator_HASHTABLE_FIND, + BuiltinOperator_HASHTABLE_IMPORT, + BuiltinOperator_HASHTABLE_SIZE }; return values; } inline const char * const *EnumNamesBuiltinOperator() { - static const char * const names[132] = { + static const char * const names[141] = { "ADD", "AVERAGE_POOL_2D", "CONCATENATION", @@ -1074,13 +1122,22 @@ inline const char * const *EnumNamesBuiltinOperator() { "CUMSUM", "CALL_ONCE", "BROADCAST_TO", + "RFFT2D", + "CONV_3D", + "IMAG", + "REAL", + "COMPLEX_ABS", + "HASHTABLE", + "HASHTABLE_FIND", + "HASHTABLE_IMPORT", + "HASHTABLE_SIZE", nullptr }; return names; } inline const char *EnumNameBuiltinOperator(BuiltinOperator e) { - if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_BROADCAST_TO)) return ""; + if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_HASHTABLE_SIZE)) return ""; const size_t index = static_cast(e); return EnumNamesBuiltinOperator()[index]; } @@ -1191,11 +1248,17 @@ enum BuiltinOptions { BuiltinOptions_CumsumOptions = 102, BuiltinOptions_CallOnceOptions = 103, BuiltinOptions_BroadcastToOptions = 104, + BuiltinOptions_Rfft2dOptions = 105, + BuiltinOptions_Conv3DOptions = 106, + BuiltinOptions_HashtableOptions = 107, + BuiltinOptions_HashtableFindOptions = 108, + BuiltinOptions_HashtableImportOptions = 109, + BuiltinOptions_HashtableSizeOptions = 110, BuiltinOptions_MIN = BuiltinOptions_NONE, - BuiltinOptions_MAX = BuiltinOptions_BroadcastToOptions + BuiltinOptions_MAX = BuiltinOptions_HashtableSizeOptions }; -inline const BuiltinOptions (&EnumValuesBuiltinOptions())[105] { +inline const BuiltinOptions (&EnumValuesBuiltinOptions())[111] { static const BuiltinOptions values[] = { BuiltinOptions_NONE, BuiltinOptions_Conv2DOptions, @@ -1301,13 +1364,19 @@ inline const BuiltinOptions (&EnumValuesBuiltinOptions())[105] { BuiltinOptions_BatchMatMulOptions, BuiltinOptions_CumsumOptions, BuiltinOptions_CallOnceOptions, - BuiltinOptions_BroadcastToOptions + BuiltinOptions_BroadcastToOptions, + BuiltinOptions_Rfft2dOptions, + BuiltinOptions_Conv3DOptions, + BuiltinOptions_HashtableOptions, + BuiltinOptions_HashtableFindOptions, + BuiltinOptions_HashtableImportOptions, + BuiltinOptions_HashtableSizeOptions }; return values; } inline const char * const *EnumNamesBuiltinOptions() { - static const char * const names[106] = { + static const char * const names[112] = { "NONE", "Conv2DOptions", "DepthwiseConv2DOptions", @@ -1413,13 +1482,19 @@ inline const char * const *EnumNamesBuiltinOptions() { "CumsumOptions", "CallOnceOptions", "BroadcastToOptions", + "Rfft2dOptions", + "Conv3DOptions", + "HashtableOptions", + "HashtableFindOptions", + "HashtableImportOptions", + "HashtableSizeOptions", nullptr }; return names; } inline const char *EnumNameBuiltinOptions(BuiltinOptions e) { - if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_BroadcastToOptions)) return ""; + if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_HashtableSizeOptions)) return ""; const size_t index = static_cast(e); return EnumNamesBuiltinOptions()[index]; } @@ -1844,6 +1919,30 @@ template<> struct BuiltinOptionsTraits { static const BuiltinOptions enum_value = BuiltinOptions_BroadcastToOptions; }; +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Rfft2dOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Conv3DOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableFindOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableImportOptions; +}; + +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableSizeOptions; +}; + struct BuiltinOptionsUnion { BuiltinOptions type; void *value; @@ -2708,6 +2807,54 @@ struct BuiltinOptionsUnion { return type == BuiltinOptions_BroadcastToOptions ? reinterpret_cast(value) : nullptr; } + tflite::Rfft2dOptionsT *AsRfft2dOptions() { + return type == BuiltinOptions_Rfft2dOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Rfft2dOptionsT *AsRfft2dOptions() const { + return type == BuiltinOptions_Rfft2dOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::Conv3DOptionsT *AsConv3DOptions() { + return type == BuiltinOptions_Conv3DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Conv3DOptionsT *AsConv3DOptions() const { + return type == BuiltinOptions_Conv3DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableOptionsT *AsHashtableOptions() { + return type == BuiltinOptions_HashtableOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableOptionsT *AsHashtableOptions() const { + return type == BuiltinOptions_HashtableOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableFindOptionsT *AsHashtableFindOptions() { + return type == BuiltinOptions_HashtableFindOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableFindOptionsT *AsHashtableFindOptions() const { + return type == BuiltinOptions_HashtableFindOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableImportOptionsT *AsHashtableImportOptions() { + return type == BuiltinOptions_HashtableImportOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableImportOptionsT *AsHashtableImportOptions() const { + return type == BuiltinOptions_HashtableImportOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() { + return type == BuiltinOptions_HashtableSizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() const { + return type == BuiltinOptions_HashtableSizeOptions ? + reinterpret_cast(value) : nullptr; + } }; bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); @@ -3904,6 +4051,144 @@ inline flatbuffers::Offset CreateConv2DOptions( flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct Conv3DOptionsT : public flatbuffers::NativeTable { + typedef Conv3DOptions TableType; + tflite::Padding padding; + int32_t stride_d; + int32_t stride_w; + int32_t stride_h; + tflite::ActivationFunctionType fused_activation_function; + int32_t dilation_d_factor; + int32_t dilation_w_factor; + int32_t dilation_h_factor; + Conv3DOptionsT() + : padding(tflite::Padding_SAME), + stride_d(0), + stride_w(0), + stride_h(0), + fused_activation_function(tflite::ActivationFunctionType_NONE), + dilation_d_factor(1), + dilation_w_factor(1), + dilation_h_factor(1) { + } +}; + +struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Conv3DOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_D = 6, + VT_STRIDE_W = 8, + VT_STRIDE_H = 10, + VT_FUSED_ACTIVATION_FUNCTION = 12, + VT_DILATION_D_FACTOR = 14, + VT_DILATION_W_FACTOR = 16, + VT_DILATION_H_FACTOR = 18 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_d() const { + return GetField(VT_STRIDE_D, 0); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_d_factor() const { + return GetField(VT_DILATION_D_FACTOR, 1); + } + int32_t dilation_w_factor() const { + return GetField(VT_DILATION_W_FACTOR, 1); + } + int32_t dilation_h_factor() const { + return GetField(VT_DILATION_H_FACTOR, 1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING) && + VerifyField(verifier, VT_STRIDE_D) && + VerifyField(verifier, VT_STRIDE_W) && + VerifyField(verifier, VT_STRIDE_H) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && + VerifyField(verifier, VT_DILATION_D_FACTOR) && + VerifyField(verifier, VT_DILATION_W_FACTOR) && + VerifyField(verifier, VT_DILATION_H_FACTOR) && + verifier.EndTable(); + } + Conv3DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Conv3DOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(Conv3DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_d(int32_t stride_d) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_D, stride_d, 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(Conv3DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_dilation_d_factor(int32_t dilation_d_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_D_FACTOR, dilation_d_factor, 1); + } + void add_dilation_w_factor(int32_t dilation_w_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit Conv3DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Conv3DOptionsBuilder &operator=(const Conv3DOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv3DOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_d = 0, + int32_t stride_w = 0, + int32_t stride_h = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + int32_t dilation_d_factor = 1, + int32_t dilation_w_factor = 1, + int32_t dilation_h_factor = 1) { + Conv3DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_dilation_d_factor(dilation_d_factor); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_stride_d(stride_d); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +flatbuffers::Offset CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + struct Pool2DOptionsT : public flatbuffers::NativeTable { typedef Pool2DOptions TableType; tflite::Padding padding; @@ -6206,22 +6491,29 @@ flatbuffers::Offset CreateEmbeddingLookupSparseOpt struct GatherOptionsT : public flatbuffers::NativeTable { typedef GatherOptions TableType; int32_t axis; + int32_t batch_dims; GatherOptionsT() - : axis(0) { + : axis(0), + batch_dims(0) { } }; struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef GatherOptionsT NativeTableType; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4 + VT_AXIS = 4, + VT_BATCH_DIMS = 6 }; int32_t axis() const { return GetField(VT_AXIS, 0); } + int32_t batch_dims() const { + return GetField(VT_BATCH_DIMS, 0); + } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_AXIS) && + VerifyField(verifier, VT_BATCH_DIMS) && verifier.EndTable(); } GatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -6235,6 +6527,9 @@ struct GatherOptionsBuilder { void add_axis(int32_t axis) { fbb_.AddElement(GatherOptions::VT_AXIS, axis, 0); } + void add_batch_dims(int32_t batch_dims) { + fbb_.AddElement(GatherOptions::VT_BATCH_DIMS, batch_dims, 0); + } explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -6249,8 +6544,10 @@ struct GatherOptionsBuilder { inline flatbuffers::Offset CreateGatherOptions( flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0) { + int32_t axis = 0, + int32_t batch_dims = 0) { GatherOptionsBuilder builder_(_fbb); + builder_.add_batch_dims(batch_dims); builder_.add_axis(axis); return builder_.Finish(); } @@ -9398,9 +9695,11 @@ struct BatchMatMulOptionsT : public flatbuffers::NativeTable { typedef BatchMatMulOptions TableType; bool adj_x; bool adj_y; + bool asymmetric_quantize_inputs; BatchMatMulOptionsT() : adj_x(false), - adj_y(false) { + adj_y(false), + asymmetric_quantize_inputs(false) { } }; @@ -9408,7 +9707,8 @@ struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BatchMatMulOptionsT NativeTableType; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_ADJ_X = 4, - VT_ADJ_Y = 6 + VT_ADJ_Y = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 }; bool adj_x() const { return GetField(VT_ADJ_X, 0) != 0; @@ -9416,10 +9716,14 @@ struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { bool adj_y() const { return GetField(VT_ADJ_Y, 0) != 0; } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_ADJ_X) && VerifyField(verifier, VT_ADJ_Y) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && verifier.EndTable(); } BatchMatMulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -9436,6 +9740,9 @@ struct BatchMatMulOptionsBuilder { void add_adj_y(bool adj_y) { fbb_.AddElement(BatchMatMulOptions::VT_ADJ_Y, static_cast(adj_y), 0); } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -9451,8 +9758,10 @@ struct BatchMatMulOptionsBuilder { inline flatbuffers::Offset CreateBatchMatMulOptions( flatbuffers::FlatBufferBuilder &_fbb, bool adj_x = false, - bool adj_y = false) { + bool adj_y = false, + bool asymmetric_quantize_inputs = false) { BatchMatMulOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); builder_.add_adj_y(adj_y); builder_.add_adj_x(adj_x); return builder_.Finish(); @@ -9566,6 +9875,244 @@ inline flatbuffers::Offset CreateBroadcastToOptions( flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +struct Rfft2dOptionsT : public flatbuffers::NativeTable { + typedef Rfft2dOptions TableType; + Rfft2dOptionsT() { + } +}; + +struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Rfft2dOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + Rfft2dOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Rfft2dOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit Rfft2dOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + Rfft2dOptionsBuilder &operator=(const Rfft2dOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRfft2dOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + Rfft2dOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HashtableOptionsT : public flatbuffers::NativeTable { + typedef HashtableOptions TableType; + int32_t table_id; + tflite::TensorType key_dtype; + tflite::TensorType value_dtype; + HashtableOptionsT() + : table_id(0), + key_dtype(tflite::TensorType_FLOAT32), + value_dtype(tflite::TensorType_FLOAT32) { + } +}; + +struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableOptionsT NativeTableType; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TABLE_ID = 4, + VT_KEY_DTYPE = 6, + VT_VALUE_DTYPE = 8 + }; + int32_t table_id() const { + return GetField(VT_TABLE_ID, 0); + } + tflite::TensorType key_dtype() const { + return static_cast(GetField(VT_KEY_DTYPE, 0)); + } + tflite::TensorType value_dtype() const { + return static_cast(GetField(VT_VALUE_DTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TABLE_ID) && + VerifyField(verifier, VT_KEY_DTYPE) && + VerifyField(verifier, VT_VALUE_DTYPE) && + verifier.EndTable(); + } + HashtableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HashtableOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_table_id(int32_t table_id) { + fbb_.AddElement(HashtableOptions::VT_TABLE_ID, table_id, 0); + } + void add_key_dtype(tflite::TensorType key_dtype) { + fbb_.AddElement(HashtableOptions::VT_KEY_DTYPE, static_cast(key_dtype), 0); + } + void add_value_dtype(tflite::TensorType value_dtype) { + fbb_.AddElement(HashtableOptions::VT_VALUE_DTYPE, static_cast(value_dtype), 0); + } + explicit HashtableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + HashtableOptionsBuilder &operator=(const HashtableOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t table_id = 0, + tflite::TensorType key_dtype = tflite::TensorType_FLOAT32, + tflite::TensorType value_dtype = tflite::TensorType_FLOAT32) { + HashtableOptionsBuilder builder_(_fbb); + builder_.add_table_id(table_id); + builder_.add_value_dtype(value_dtype); + builder_.add_key_dtype(key_dtype); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HashtableFindOptionsT : public flatbuffers::NativeTable { + typedef HashtableFindOptions TableType; + HashtableFindOptionsT() { + } +}; + +struct HashtableFindOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableFindOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + HashtableFindOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HashtableFindOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HashtableFindOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + HashtableFindOptionsBuilder &operator=(const HashtableFindOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableFindOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + HashtableFindOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HashtableImportOptionsT : public flatbuffers::NativeTable { + typedef HashtableImportOptions TableType; + HashtableImportOptionsT() { + } +}; + +struct HashtableImportOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableImportOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + HashtableImportOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HashtableImportOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HashtableImportOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + HashtableImportOptionsBuilder &operator=(const HashtableImportOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableImportOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + HashtableImportOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HashtableSizeOptionsT : public flatbuffers::NativeTable { + typedef HashtableSizeOptions TableType; + HashtableSizeOptionsT() { + } +}; + +struct HashtableSizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableSizeOptionsT NativeTableType; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + HashtableSizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HashtableSizeOptionsBuilder { + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HashtableSizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + HashtableSizeOptionsBuilder &operator=(const HashtableSizeOptionsBuilder &); + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableSizeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + HashtableSizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + struct OperatorCodeT : public flatbuffers::NativeTable { typedef OperatorCode TableType; int8_t deprecated_builtin_code; @@ -10028,6 +10575,24 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { const tflite::BroadcastToOptions *builtin_options_as_BroadcastToOptions() const { return builtin_options_type() == tflite::BuiltinOptions_BroadcastToOptions ? static_cast(builtin_options()) : nullptr; } + const tflite::Rfft2dOptions *builtin_options_as_Rfft2dOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Rfft2dOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::Conv3DOptions *builtin_options_as_Conv3DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Conv3DOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HashtableOptions *builtin_options_as_HashtableOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HashtableFindOptions *builtin_options_as_HashtableFindOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableFindOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HashtableImportOptions *builtin_options_as_HashtableImportOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableImportOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HashtableSizeOptions *builtin_options_as_HashtableSizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableSizeOptions ? static_cast(builtin_options()) : nullptr; + } const flatbuffers::Vector *custom_options() const { return GetPointer *>(VT_CUSTOM_OPTIONS); } @@ -10480,6 +11045,30 @@ template<> inline const tflite::BroadcastToOptions *Operator::builtin_options_as return builtin_options_as_BroadcastToOptions(); } +template<> inline const tflite::Rfft2dOptions *Operator::builtin_options_as() const { + return builtin_options_as_Rfft2dOptions(); +} + +template<> inline const tflite::Conv3DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Conv3DOptions(); +} + +template<> inline const tflite::HashtableOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableOptions(); +} + +template<> inline const tflite::HashtableFindOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableFindOptions(); +} + +template<> inline const tflite::HashtableImportOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableImportOptions(); +} + +template<> inline const tflite::HashtableSizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableSizeOptions(); +} + struct OperatorBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; @@ -11514,6 +12103,53 @@ inline flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatB _dilation_h_factor); } +inline Conv3DOptionsT *Conv3DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Conv3DOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Conv3DOptions::UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_d(); _o->stride_d = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = dilation_d_factor(); _o->dilation_d_factor = _e; } + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } +} + +inline flatbuffers::Offset Conv3DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConv3DOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv3DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_d = _o->stride_d; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_d_factor = _o->dilation_d_factor; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + return tflite::CreateConv3DOptions( + _fbb, + _padding, + _stride_d, + _stride_w, + _stride_h, + _fused_activation_function, + _dilation_d_factor, + _dilation_w_factor, + _dilation_h_factor); +} + inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = new Pool2DOptionsT(); UnPackTo(_o, _resolver); @@ -12508,6 +13144,7 @@ inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resol (void)_o; (void)_resolver; { auto _e = axis(); _o->axis = _e; } + { auto _e = batch_dims(); _o->batch_dims = _e; } } inline flatbuffers::Offset GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { @@ -12519,9 +13156,11 @@ inline flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatB (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _axis = _o->axis; + auto _batch_dims = _o->batch_dims; return tflite::CreateGatherOptions( _fbb, - _axis); + _axis, + _batch_dims); } inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { @@ -14164,6 +14803,7 @@ inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const flatbuff (void)_resolver; { auto _e = adj_x(); _o->adj_x = _e; } { auto _e = adj_y(); _o->adj_y = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } } inline flatbuffers::Offset BatchMatMulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { @@ -14176,10 +14816,12 @@ inline flatbuffers::Offset CreateBatchMatMulOptions(flatbuff struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _adj_x = _o->adj_x; auto _adj_y = _o->adj_y; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; return tflite::CreateBatchMatMulOptions( _fbb, _adj_x, - _adj_y); + _adj_y, + _asymmetric_quantize_inputs); } inline CumsumOptionsT *CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { @@ -14234,6 +14876,130 @@ inline flatbuffers::Offset CreateBroadcastToOptions(flatbuff _fbb); } +inline Rfft2dOptionsT *Rfft2dOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new Rfft2dOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void Rfft2dOptions::UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset Rfft2dOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRfft2dOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Rfft2dOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRfft2dOptions( + _fbb); +} + +inline HashtableOptionsT *HashtableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new HashtableOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void HashtableOptions::UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = table_id(); _o->table_id = _e; } + { auto _e = key_dtype(); _o->key_dtype = _e; } + { auto _e = value_dtype(); _o->value_dtype = _e; } +} + +inline flatbuffers::Offset HashtableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _table_id = _o->table_id; + auto _key_dtype = _o->key_dtype; + auto _value_dtype = _o->value_dtype; + return tflite::CreateHashtableOptions( + _fbb, + _table_id, + _key_dtype, + _value_dtype); +} + +inline HashtableFindOptionsT *HashtableFindOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new HashtableFindOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void HashtableFindOptions::UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset HashtableFindOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableFindOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableFindOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableFindOptions( + _fbb); +} + +inline HashtableImportOptionsT *HashtableImportOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new HashtableImportOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void HashtableImportOptions::UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset HashtableImportOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableImportOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableImportOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableImportOptions( + _fbb); +} + +inline HashtableSizeOptionsT *HashtableSizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = new HashtableSizeOptionsT(); + UnPackTo(_o, _resolver); + return _o; +} + +inline void HashtableSizeOptions::UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset HashtableSizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableSizeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableSizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableSizeOptions( + _fbb); +} + inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = new OperatorCodeT(); UnPackTo(_o, _resolver); @@ -15125,6 +15891,30 @@ inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *ob auto ptr = reinterpret_cast(obj); return verifier.VerifyTable(ptr); } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } default: return true; } } @@ -15559,6 +16349,30 @@ inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, c auto ptr = reinterpret_cast(obj); return ptr->UnPack(resolver); } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } default: return nullptr; } } @@ -15981,6 +16795,30 @@ inline flatbuffers::Offset BuiltinOptionsUnion::Pack(flatbuffers::FlatBuff auto ptr = reinterpret_cast(value); return CreateBroadcastToOptions(_fbb, ptr, _rehasher).Union(); } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(value); + return CreateRfft2dOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(value); + return CreateConv3DOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableFindOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableImportOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableSizeOptions(_fbb, ptr, _rehasher).Union(); + } default: return 0; } } @@ -16403,6 +17241,30 @@ inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FL value = new tflite::BroadcastToOptionsT(*reinterpret_cast(u.value)); break; } + case BuiltinOptions_Rfft2dOptions: { + value = new tflite::Rfft2dOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_Conv3DOptions: { + value = new tflite::Conv3DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableOptions: { + value = new tflite::HashtableOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableFindOptions: { + value = new tflite::HashtableFindOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableImportOptions: { + value = new tflite::HashtableImportOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableSizeOptions: { + value = new tflite::HashtableSizeOptionsT(*reinterpret_cast(u.value)); + break; + } default: break; } @@ -16930,6 +17792,36 @@ inline void BuiltinOptionsUnion::Reset() { delete ptr; break; } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } default: break; } value = nullptr; diff --git a/code/components/tfmicro/tensorflow/lite/schema/schema_utils.h b/code/components/tfmicro/tensorflow/lite/schema/schema_utils.h index 453276b9..9cca36c7 100644 --- a/code/components/tfmicro/tensorflow/lite/schema/schema_utils.h +++ b/code/components/tfmicro/tensorflow/lite/schema/schema_utils.h @@ -21,7 +21,7 @@ limitations under the License. namespace tflite { // The following methods are introduced to resolve op builtin code shortage -// problem. The new builtin opreator will be assigned to the extended builtin +// problem. The new builtin operator will be assigned to the extended builtin // code field in the flatbuffer schema. Those methods helps to hide builtin code // details. BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); diff --git a/code/components/tfmicro/tensorflow/lite/version.h b/code/components/tfmicro/tensorflow/lite/version.h deleted file mode 100644 index f667447b..00000000 --- a/code/components/tfmicro/tensorflow/lite/version.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_VERSION_H_ -#define TENSORFLOW_LITE_VERSION_H_ - -#include "tensorflow/core/public/version.h" - -// The version number of the Schema. Ideally all changes will be backward -// compatible. If that ever changes, we must ensure that version is the first -// entry in the new tflite root so that we can see that version is not 1. -#define TFLITE_SCHEMA_VERSION (3) - -// TensorFlow Lite Runtime version. -// This value is currently shared with that of TensorFlow. -#define TFLITE_VERSION_STRING TF_VERSION_STRING - -#endif // TENSORFLOW_LITE_VERSION_H_ diff --git a/code/components/tfmicro/third_party/flatbuffers/include/flatbuffers/flexbuffers.h b/code/components/tfmicro/third_party/flatbuffers/include/flatbuffers/flexbuffers.h new file mode 100644 index 00000000..a4401f8b --- /dev/null +++ b/code/components/tfmicro/third_party/flatbuffers/include/flatbuffers/flexbuffers.h @@ -0,0 +1,1631 @@ +/* + * Copyright 2017 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_FLEXBUFFERS_H_ +#define FLATBUFFERS_FLEXBUFFERS_H_ + +#include +// Used to select STL variant. +#include "flatbuffers/base.h" +// We use the basic binary writing functions from the regular FlatBuffers. +#include "flatbuffers/util.h" + +#ifdef _MSC_VER +# include +#endif + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4127) // C4127: conditional expression is constant +#endif + +namespace flexbuffers { + +class Reference; +class Map; + +// These are used in the lower 2 bits of a type field to determine the size of +// the elements (and or size field) of the item pointed to (e.g. vector). +enum BitWidth { + BIT_WIDTH_8 = 0, + BIT_WIDTH_16 = 1, + BIT_WIDTH_32 = 2, + BIT_WIDTH_64 = 3, +}; + +// These are used as the upper 6 bits of a type field to indicate the actual +// type. +enum Type { + FBT_NULL = 0, + FBT_INT = 1, + FBT_UINT = 2, + FBT_FLOAT = 3, + // Types above stored inline, types below store an offset. + FBT_KEY = 4, + FBT_STRING = 5, + FBT_INDIRECT_INT = 6, + FBT_INDIRECT_UINT = 7, + FBT_INDIRECT_FLOAT = 8, + FBT_MAP = 9, + FBT_VECTOR = 10, // Untyped. + FBT_VECTOR_INT = 11, // Typed any size (stores no type table). + FBT_VECTOR_UINT = 12, + FBT_VECTOR_FLOAT = 13, + FBT_VECTOR_KEY = 14, + // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead. + // Read test.cpp/FlexBuffersDeprecatedTest() for details on why. + FBT_VECTOR_STRING_DEPRECATED = 15, + FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field). + FBT_VECTOR_UINT2 = 17, + FBT_VECTOR_FLOAT2 = 18, + FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field). + FBT_VECTOR_UINT3 = 20, + FBT_VECTOR_FLOAT3 = 21, + FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field). + FBT_VECTOR_UINT4 = 23, + FBT_VECTOR_FLOAT4 = 24, + FBT_BLOB = 25, + FBT_BOOL = 26, + FBT_VECTOR_BOOL = + 36, // To Allow the same type of conversion of type to vector type +}; + +inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; } + +inline bool IsTypedVectorElementType(Type t) { + return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL; +} + +inline bool IsTypedVector(Type t) { + return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) || + t == FBT_VECTOR_BOOL; +} + +inline bool IsFixedTypedVector(Type t) { + return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; +} + +inline Type ToTypedVector(Type t, size_t fixed_len = 0) { + FLATBUFFERS_ASSERT(IsTypedVectorElementType(t)); + switch (fixed_len) { + case 0: return static_cast(t - FBT_INT + FBT_VECTOR_INT); + case 2: return static_cast(t - FBT_INT + FBT_VECTOR_INT2); + case 3: return static_cast(t - FBT_INT + FBT_VECTOR_INT3); + case 4: return static_cast(t - FBT_INT + FBT_VECTOR_INT4); + default: FLATBUFFERS_ASSERT(0); return FBT_NULL; + } +} + +inline Type ToTypedVectorElementType(Type t) { + FLATBUFFERS_ASSERT(IsTypedVector(t)); + return static_cast(t - FBT_VECTOR_INT + FBT_INT); +} + +inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) { + FLATBUFFERS_ASSERT(IsFixedTypedVector(t)); + auto fixed_type = t - FBT_VECTOR_INT2; + *len = static_cast(fixed_type / 3 + + 2); // 3 types each, starting from length 2. + return static_cast(fixed_type % 3 + FBT_INT); +} + +// TODO: implement proper support for 8/16bit floats, or decide not to +// support them. +typedef int16_t half; +typedef int8_t quarter; + +// TODO: can we do this without conditionals using intrinsics or inline asm +// on some platforms? Given branch prediction the method below should be +// decently quick, but it is the most frequently executed function. +// We could do an (unaligned) 64-bit read if we ifdef out the platforms for +// which that doesn't work (or where we'd read into un-owned memory). +template +R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { + return byte_width < 4 + ? (byte_width < 2 + ? static_cast(flatbuffers::ReadScalar(data)) + : static_cast(flatbuffers::ReadScalar(data))) + : (byte_width < 8 + ? static_cast(flatbuffers::ReadScalar(data)) + : static_cast(flatbuffers::ReadScalar(data))); +} + +inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) { + return ReadSizedScalar( + data, byte_width); +} + +inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) { + // This is the "hottest" function (all offset lookups use this), so worth + // optimizing if possible. + // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a + // constant, which here it isn't. Test if memcpy is still faster than + // the conditionals in ReadSizedScalar. Can also use inline asm. + // clang-format off + #if defined(_MSC_VER) && (defined(_M_X64) || defined _M_IX86) + uint64_t u = 0; + __movsb(reinterpret_cast(&u), + reinterpret_cast(data), byte_width); + return flatbuffers::EndianScalar(u); + #else + return ReadSizedScalar( + data, byte_width); + #endif + // clang-format on +} + +inline double ReadDouble(const uint8_t *data, uint8_t byte_width) { + return ReadSizedScalar(data, + byte_width); +} + +inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) { + return offset - ReadUInt64(offset, byte_width); +} + +template const uint8_t *Indirect(const uint8_t *offset) { + return offset - flatbuffers::ReadScalar(offset); +} + +inline BitWidth WidthU(uint64_t u) { +#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \ + { \ + if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \ + } + FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8); + FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16); + FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32); +#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH + return BIT_WIDTH_64; +} + +inline BitWidth WidthI(int64_t i) { + auto u = static_cast(i) << 1; + return WidthU(i >= 0 ? u : ~u); +} + +inline BitWidth WidthF(double f) { + return static_cast(static_cast(f)) == f ? BIT_WIDTH_32 + : BIT_WIDTH_64; +} + +// Base class of all types below. +// Points into the data buffer and allows access to one type. +class Object { + public: + Object(const uint8_t *data, uint8_t byte_width) + : data_(data), byte_width_(byte_width) {} + + protected: + const uint8_t *data_; + uint8_t byte_width_; +}; + +// Object that has a size, obtained either from size prefix, or elsewhere. +class Sized : public Object { + public: + // Size prefix. + Sized(const uint8_t *data, uint8_t byte_width) + : Object(data, byte_width), size_(read_size()) {} + // Manual size. + Sized(const uint8_t *data, uint8_t byte_width, size_t sz) + : Object(data, byte_width), size_(sz) {} + size_t size() const { return size_; } + // Access size stored in `byte_width_` bytes before data_ pointer. + size_t read_size() const { + return static_cast(ReadUInt64(data_ - byte_width_, byte_width_)); + } + + protected: + size_t size_; +}; + +class String : public Sized { + public: + // Size prefix. + String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} + // Manual size. + String(const uint8_t *data, uint8_t byte_width, size_t sz) + : Sized(data, byte_width, sz) {} + + size_t length() const { return size(); } + const char *c_str() const { return reinterpret_cast(data_); } + std::string str() const { return std::string(c_str(), size()); } + + static String EmptyString() { + static const char *empty_string = ""; + return String(reinterpret_cast(empty_string), 1, 0); + } + bool IsTheEmptyString() const { return data_ == EmptyString().data_; } +}; + +class Blob : public Sized { + public: + Blob(const uint8_t *data_buf, uint8_t byte_width) + : Sized(data_buf, byte_width) {} + + static Blob EmptyBlob() { + static const uint8_t empty_blob[] = { 0 /*len*/ }; + return Blob(empty_blob + 1, 1); + } + bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; } + const uint8_t *data() const { return data_; } +}; + +class Vector : public Sized { + public: + Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} + + Reference operator[](size_t i) const; + + static Vector EmptyVector() { + static const uint8_t empty_vector[] = { 0 /*len*/ }; + return Vector(empty_vector + 1, 1); + } + bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; } +}; + +class TypedVector : public Sized { + public: + TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type) + : Sized(data, byte_width), type_(element_type) {} + + Reference operator[](size_t i) const; + + static TypedVector EmptyTypedVector() { + static const uint8_t empty_typed_vector[] = { 0 /*len*/ }; + return TypedVector(empty_typed_vector + 1, 1, FBT_INT); + } + bool IsTheEmptyVector() const { + return data_ == TypedVector::EmptyTypedVector().data_; + } + + Type ElementType() { return type_; } + + friend Reference; + + private: + Type type_; + + friend Map; +}; + +class FixedTypedVector : public Object { + public: + FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, + uint8_t len) + : Object(data, byte_width), type_(element_type), len_(len) {} + + Reference operator[](size_t i) const; + + static FixedTypedVector EmptyFixedTypedVector() { + static const uint8_t fixed_empty_vector[] = { 0 /* unused */ }; + return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0); + } + bool IsTheEmptyFixedTypedVector() const { + return data_ == FixedTypedVector::EmptyFixedTypedVector().data_; + } + + Type ElementType() { return type_; } + uint8_t size() { return len_; } + + private: + Type type_; + uint8_t len_; +}; + +class Map : public Vector { + public: + Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {} + + Reference operator[](const char *key) const; + Reference operator[](const std::string &key) const; + + Vector Values() const { return Vector(data_, byte_width_); } + + TypedVector Keys() const { + const size_t num_prefixed_fields = 3; + auto keys_offset = data_ - byte_width_ * num_prefixed_fields; + return TypedVector(Indirect(keys_offset, byte_width_), + static_cast( + ReadUInt64(keys_offset + byte_width_, byte_width_)), + FBT_KEY); + } + + static Map EmptyMap() { + static const uint8_t empty_map[] = { + 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/ + }; + return Map(empty_map + 4, 1); + } + + bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; } +}; + +template +void AppendToString(std::string &s, T &&v, bool keys_quoted) { + s += "[ "; + for (size_t i = 0; i < v.size(); i++) { + if (i) s += ", "; + v[i].ToString(true, keys_quoted, s); + } + s += " ]"; +} + +class Reference { + public: + Reference() + : data_(nullptr), + parent_width_(0), + byte_width_(BIT_WIDTH_8), + type_(FBT_NULL) {} + + Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, + Type type) + : data_(data), + parent_width_(parent_width), + byte_width_(byte_width), + type_(type) {} + + Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type) + : data_(data), parent_width_(parent_width) { + byte_width_ = 1U << static_cast(packed_type & 3); + type_ = static_cast(packed_type >> 2); + } + + Type GetType() const { return type_; } + + bool IsNull() const { return type_ == FBT_NULL; } + bool IsBool() const { return type_ == FBT_BOOL; } + bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; } + bool IsUInt() const { + return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; + } + bool IsIntOrUint() const { return IsInt() || IsUInt(); } + bool IsFloat() const { + return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; + } + bool IsNumeric() const { return IsIntOrUint() || IsFloat(); } + bool IsString() const { return type_ == FBT_STRING; } + bool IsKey() const { return type_ == FBT_KEY; } + bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; } + bool IsUntypedVector() const { return type_ == FBT_VECTOR; } + bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); } + bool IsFixedTypedVector() const { + return flexbuffers::IsFixedTypedVector(type_); + } + bool IsAnyVector() const { + return (IsTypedVector() || IsFixedTypedVector() || IsVector()); + } + bool IsMap() const { return type_ == FBT_MAP; } + bool IsBlob() const { return type_ == FBT_BLOB; } + bool AsBool() const { + return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) + : AsUInt64()) != 0; + } + + // Reads any type as a int64_t. Never fails, does most sensible conversion. + // Truncates floats, strings are attempted to be parsed for a number, + // vectors/maps return their size. Returns 0 if all else fails. + int64_t AsInt64() const { + if (type_ == FBT_INT) { + // A fast path for the common case. + return ReadInt64(data_, parent_width_); + } else + switch (type_) { + case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); + case FBT_UINT: return ReadUInt64(data_, parent_width_); + case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); + case FBT_FLOAT: + return static_cast(ReadDouble(data_, parent_width_)); + case FBT_INDIRECT_FLOAT: + return static_cast(ReadDouble(Indirect(), byte_width_)); + case FBT_NULL: return 0; + case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str()); + case FBT_VECTOR: return static_cast(AsVector().size()); + case FBT_BOOL: return ReadInt64(data_, parent_width_); + default: + // Convert other things to int. + return 0; + } + } + + // TODO: could specialize these to not use AsInt64() if that saves + // extension ops in generated code, and use a faster op than ReadInt64. + int32_t AsInt32() const { return static_cast(AsInt64()); } + int16_t AsInt16() const { return static_cast(AsInt64()); } + int8_t AsInt8() const { return static_cast(AsInt64()); } + + uint64_t AsUInt64() const { + if (type_ == FBT_UINT) { + // A fast path for the common case. + return ReadUInt64(data_, parent_width_); + } else + switch (type_) { + case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); + case FBT_INT: return ReadInt64(data_, parent_width_); + case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); + case FBT_FLOAT: + return static_cast(ReadDouble(data_, parent_width_)); + case FBT_INDIRECT_FLOAT: + return static_cast(ReadDouble(Indirect(), byte_width_)); + case FBT_NULL: return 0; + case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str()); + case FBT_VECTOR: return static_cast(AsVector().size()); + case FBT_BOOL: return ReadUInt64(data_, parent_width_); + default: + // Convert other things to uint. + return 0; + } + } + + uint32_t AsUInt32() const { return static_cast(AsUInt64()); } + uint16_t AsUInt16() const { return static_cast(AsUInt64()); } + uint8_t AsUInt8() const { return static_cast(AsUInt64()); } + + double AsDouble() const { + if (type_ == FBT_FLOAT) { + // A fast path for the common case. + return ReadDouble(data_, parent_width_); + } else + switch (type_) { + case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_); + case FBT_INT: + return static_cast(ReadInt64(data_, parent_width_)); + case FBT_UINT: + return static_cast(ReadUInt64(data_, parent_width_)); + case FBT_INDIRECT_INT: + return static_cast(ReadInt64(Indirect(), byte_width_)); + case FBT_INDIRECT_UINT: + return static_cast(ReadUInt64(Indirect(), byte_width_)); + case FBT_NULL: return 0.0; + case FBT_STRING: { +#if 1 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wnull-dereference" + // TODO(b/173239141): Patched via micro/tools/make/flexbuffers_download.sh + // Introduce a segfault for an unsupported code path for TFLM. + return *(static_cast(nullptr)); +#pragma GCC diagnostic pop +#else + // This is the original code + double d; + flatbuffers::StringToNumber(AsString().c_str(), &d); + return d; +#endif + } + case FBT_VECTOR: return static_cast(AsVector().size()); + case FBT_BOOL: + return static_cast(ReadUInt64(data_, parent_width_)); + default: + // Convert strings and other things to float. + return 0; + } + } + + float AsFloat() const { return static_cast(AsDouble()); } + + const char *AsKey() const { + if (type_ == FBT_KEY || type_ == FBT_STRING) { + return reinterpret_cast(Indirect()); + } else { + return ""; + } + } + + // This function returns the empty string if you try to read something that + // is not a string or key. + String AsString() const { + if (type_ == FBT_STRING) { + return String(Indirect(), byte_width_); + } else if (type_ == FBT_KEY) { + auto key = Indirect(); + return String(key, byte_width_, + strlen(reinterpret_cast(key))); + } else { + return String::EmptyString(); + } + } + + // Unlike AsString(), this will convert any type to a std::string. + std::string ToString() const { + std::string s; + ToString(false, false, s); + return s; + } + + // Convert any type to a JSON-like string. strings_quoted determines if + // string values at the top level receive "" quotes (inside other values + // they always do). keys_quoted determines if keys are quoted, at any level. + // TODO(wvo): add further options to have indentation/newlines. + void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const { + if (type_ == FBT_STRING) { + String str(Indirect(), byte_width_); + if (strings_quoted) { + flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false); + } else { + s.append(str.c_str(), str.length()); + } + } else if (IsKey()) { + auto str = AsKey(); + if (keys_quoted) { + flatbuffers::EscapeString(str, strlen(str), &s, true, false); + } else { + s += str; + } + } else if (IsInt()) { + s += flatbuffers::NumToString(AsInt64()); + } else if (IsUInt()) { + s += flatbuffers::NumToString(AsUInt64()); + } else if (IsFloat()) { + s += flatbuffers::NumToString(AsDouble()); + } else if (IsNull()) { + s += "null"; + } else if (IsBool()) { + s += AsBool() ? "true" : "false"; + } else if (IsMap()) { + s += "{ "; + auto m = AsMap(); + auto keys = m.Keys(); + auto vals = m.Values(); + for (size_t i = 0; i < keys.size(); i++) { + keys[i].ToString(true, keys_quoted, s); + s += ": "; + vals[i].ToString(true, keys_quoted, s); + if (i < keys.size() - 1) s += ", "; + } + s += " }"; + } else if (IsVector()) { + AppendToString(s, AsVector(), keys_quoted); + } else if (IsTypedVector()) { + AppendToString(s, AsTypedVector(), keys_quoted); + } else if (IsFixedTypedVector()) { + AppendToString(s, AsFixedTypedVector(), keys_quoted); + } else if (IsBlob()) { + auto blob = AsBlob(); + flatbuffers::EscapeString(reinterpret_cast(blob.data()), + blob.size(), &s, true, false); + } else { + s += "(?)"; + } + } + + // This function returns the empty blob if you try to read a not-blob. + // Strings can be viewed as blobs too. + Blob AsBlob() const { + if (type_ == FBT_BLOB || type_ == FBT_STRING) { + return Blob(Indirect(), byte_width_); + } else { + return Blob::EmptyBlob(); + } + } + + // This function returns the empty vector if you try to read a not-vector. + // Maps can be viewed as vectors too. + Vector AsVector() const { + if (type_ == FBT_VECTOR || type_ == FBT_MAP) { + return Vector(Indirect(), byte_width_); + } else { + return Vector::EmptyVector(); + } + } + + TypedVector AsTypedVector() const { + if (IsTypedVector()) { + auto tv = + TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_)); + if (tv.type_ == FBT_STRING) { + // These can't be accessed as strings, since we don't know the bit-width + // of the size field, see the declaration of + // FBT_VECTOR_STRING_DEPRECATED above for details. + // We change the type here to be keys, which are a subtype of strings, + // and will ignore the size field. This will truncate strings with + // embedded nulls. + tv.type_ = FBT_KEY; + } + return tv; + } else { + return TypedVector::EmptyTypedVector(); + } + } + + FixedTypedVector AsFixedTypedVector() const { + if (IsFixedTypedVector()) { + uint8_t len = 0; + auto vtype = ToFixedTypedVectorElementType(type_, &len); + return FixedTypedVector(Indirect(), byte_width_, vtype, len); + } else { + return FixedTypedVector::EmptyFixedTypedVector(); + } + } + + Map AsMap() const { + if (type_ == FBT_MAP) { + return Map(Indirect(), byte_width_); + } else { + return Map::EmptyMap(); + } + } + + template T As() const; + + // Experimental: Mutation functions. + // These allow scalars in an already created buffer to be updated in-place. + // Since by default scalars are stored in the smallest possible space, + // the new value may not fit, in which case these functions return false. + // To avoid this, you can construct the values you intend to mutate using + // Builder::ForceMinimumBitWidth. + bool MutateInt(int64_t i) { + if (type_ == FBT_INT) { + return Mutate(data_, i, parent_width_, WidthI(i)); + } else if (type_ == FBT_INDIRECT_INT) { + return Mutate(Indirect(), i, byte_width_, WidthI(i)); + } else if (type_ == FBT_UINT) { + auto u = static_cast(i); + return Mutate(data_, u, parent_width_, WidthU(u)); + } else if (type_ == FBT_INDIRECT_UINT) { + auto u = static_cast(i); + return Mutate(Indirect(), u, byte_width_, WidthU(u)); + } else { + return false; + } + } + + bool MutateBool(bool b) { + return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8); + } + + bool MutateUInt(uint64_t u) { + if (type_ == FBT_UINT) { + return Mutate(data_, u, parent_width_, WidthU(u)); + } else if (type_ == FBT_INDIRECT_UINT) { + return Mutate(Indirect(), u, byte_width_, WidthU(u)); + } else if (type_ == FBT_INT) { + auto i = static_cast(u); + return Mutate(data_, i, parent_width_, WidthI(i)); + } else if (type_ == FBT_INDIRECT_INT) { + auto i = static_cast(u); + return Mutate(Indirect(), i, byte_width_, WidthI(i)); + } else { + return false; + } + } + + bool MutateFloat(float f) { + if (type_ == FBT_FLOAT) { + return MutateF(data_, f, parent_width_, BIT_WIDTH_32); + } else if (type_ == FBT_INDIRECT_FLOAT) { + return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32); + } else { + return false; + } + } + + bool MutateFloat(double d) { + if (type_ == FBT_FLOAT) { + return MutateF(data_, d, parent_width_, WidthF(d)); + } else if (type_ == FBT_INDIRECT_FLOAT) { + return MutateF(Indirect(), d, byte_width_, WidthF(d)); + } else { + return false; + } + } + + bool MutateString(const char *str, size_t len) { + auto s = AsString(); + if (s.IsTheEmptyString()) return false; + // This is very strict, could allow shorter strings, but that creates + // garbage. + if (s.length() != len) return false; + memcpy(const_cast(s.c_str()), str, len); + return true; + } + bool MutateString(const char *str) { return MutateString(str, strlen(str)); } + bool MutateString(const std::string &str) { + return MutateString(str.data(), str.length()); + } + + private: + const uint8_t *Indirect() const { + return flexbuffers::Indirect(data_, parent_width_); + } + + template + bool Mutate(const uint8_t *dest, T t, size_t byte_width, + BitWidth value_width) { + auto fits = static_cast(static_cast(1U) << value_width) <= + byte_width; + if (fits) { + t = flatbuffers::EndianScalar(t); + memcpy(const_cast(dest), &t, byte_width); + } + return fits; + } + + template + bool MutateF(const uint8_t *dest, T t, size_t byte_width, + BitWidth value_width) { + if (byte_width == sizeof(double)) + return Mutate(dest, static_cast(t), byte_width, value_width); + if (byte_width == sizeof(float)) + return Mutate(dest, static_cast(t), byte_width, value_width); + FLATBUFFERS_ASSERT(false); + return false; + } + + const uint8_t *data_; + uint8_t parent_width_; + uint8_t byte_width_; + Type type_; +}; + +// Template specialization for As(). +template<> inline bool Reference::As() const { return AsBool(); } + +template<> inline int8_t Reference::As() const { return AsInt8(); } +template<> inline int16_t Reference::As() const { return AsInt16(); } +template<> inline int32_t Reference::As() const { return AsInt32(); } +template<> inline int64_t Reference::As() const { return AsInt64(); } + +template<> inline uint8_t Reference::As() const { return AsUInt8(); } +template<> inline uint16_t Reference::As() const { + return AsUInt16(); +} +template<> inline uint32_t Reference::As() const { + return AsUInt32(); +} +template<> inline uint64_t Reference::As() const { + return AsUInt64(); +} + +template<> inline double Reference::As() const { return AsDouble(); } +template<> inline float Reference::As() const { return AsFloat(); } + +template<> inline String Reference::As() const { return AsString(); } +template<> inline std::string Reference::As() const { + return AsString().str(); +} + +template<> inline Blob Reference::As() const { return AsBlob(); } +template<> inline Vector Reference::As() const { return AsVector(); } +template<> inline TypedVector Reference::As() const { + return AsTypedVector(); +} +template<> inline FixedTypedVector Reference::As() const { + return AsFixedTypedVector(); +} +template<> inline Map Reference::As() const { return AsMap(); } + +inline uint8_t PackedType(BitWidth bit_width, Type type) { + return static_cast(bit_width | (type << 2)); +} + +inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); } + +// Vector accessors. +// Note: if you try to access outside of bounds, you get a Null value back +// instead. Normally this would be an assert, but since this is "dynamically +// typed" data, you may not want that (someone sends you a 2d vector and you +// wanted 3d). +// The Null converts seamlessly into a default value for any other type. +// TODO(wvo): Could introduce an #ifdef that makes this into an assert? +inline Reference Vector::operator[](size_t i) const { + auto len = size(); + if (i >= len) return Reference(nullptr, 1, NullPackedType()); + auto packed_type = (data_ + len * byte_width_)[i]; + auto elem = data_ + i * byte_width_; + return Reference(elem, byte_width_, packed_type); +} + +inline Reference TypedVector::operator[](size_t i) const { + auto len = size(); + if (i >= len) return Reference(nullptr, 1, NullPackedType()); + auto elem = data_ + i * byte_width_; + return Reference(elem, byte_width_, 1, type_); +} + +inline Reference FixedTypedVector::operator[](size_t i) const { + if (i >= len_) return Reference(nullptr, 1, NullPackedType()); + auto elem = data_ + i * byte_width_; + return Reference(elem, byte_width_, 1, type_); +} + +template int KeyCompare(const void *key, const void *elem) { + auto str_elem = reinterpret_cast( + Indirect(reinterpret_cast(elem))); + auto skey = reinterpret_cast(key); + return strcmp(skey, str_elem); +} + +inline Reference Map::operator[](const char *key) const { + auto keys = Keys(); + // We can't pass keys.byte_width_ to the comparison function, so we have + // to pick the right one ahead of time. + int (*comp)(const void *, const void *) = nullptr; + switch (keys.byte_width_) { + case 1: comp = KeyCompare; break; + case 2: comp = KeyCompare; break; + case 4: comp = KeyCompare; break; + case 8: comp = KeyCompare; break; + } + auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp); + if (!res) return Reference(nullptr, 1, NullPackedType()); + auto i = (reinterpret_cast(res) - keys.data_) / keys.byte_width_; + return (*static_cast(this))[i]; +} + +inline Reference Map::operator[](const std::string &key) const { + return (*this)[key.c_str()]; +} + +inline Reference GetRoot(const uint8_t *buffer, size_t size) { + // See Finish() below for the serialization counterpart of this. + // The root starts at the end of the buffer, so we parse backwards from there. + auto end = buffer + size; + auto byte_width = *--end; + auto packed_type = *--end; + end -= byte_width; // The root data item. + return Reference(end, byte_width, packed_type); +} + +inline Reference GetRoot(const std::vector &buffer) { + return GetRoot(flatbuffers::vector_data(buffer), buffer.size()); +} + +// Flags that configure how the Builder behaves. +// The "Share" flags determine if the Builder automatically tries to pool +// this type. Pooling can reduce the size of serialized data if there are +// multiple maps of the same kind, at the expense of slightly slower +// serialization (the cost of lookups) and more memory use (std::set). +// By default this is on for keys, but off for strings. +// Turn keys off if you have e.g. only one map. +// Turn strings on if you expect many non-unique string values. +// Additionally, sharing key vectors can save space if you have maps with +// identical field populations. +enum BuilderFlag { + BUILDER_FLAG_NONE = 0, + BUILDER_FLAG_SHARE_KEYS = 1, + BUILDER_FLAG_SHARE_STRINGS = 2, + BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3, + BUILDER_FLAG_SHARE_KEY_VECTORS = 4, + BUILDER_FLAG_SHARE_ALL = 7, +}; + +class Builder FLATBUFFERS_FINAL_CLASS { + public: + Builder(size_t initial_size = 256, + BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS) + : buf_(initial_size), + finished_(false), + flags_(flags), + force_min_bit_width_(BIT_WIDTH_8), + key_pool(KeyOffsetCompare(buf_)), + string_pool(StringOffsetCompare(buf_)) { + buf_.clear(); + } + + /// @brief Get the serialized buffer (after you call `Finish()`). + /// @return Returns a vector owned by this class. + const std::vector &GetBuffer() const { + Finished(); + return buf_; + } + + // Size of the buffer. Does not include unfinished values. + size_t GetSize() const { return buf_.size(); } + + // Reset all state so we can re-use the buffer. + void Clear() { + buf_.clear(); + stack_.clear(); + finished_ = false; + // flags_ remains as-is; + force_min_bit_width_ = BIT_WIDTH_8; + key_pool.clear(); + string_pool.clear(); + } + + // All value constructing functions below have two versions: one that + // takes a key (for placement inside a map) and one that doesn't (for inside + // vectors and elsewhere). + + void Null() { stack_.push_back(Value()); } + void Null(const char *key) { + Key(key); + Null(); + } + + void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); } + void Int(const char *key, int64_t i) { + Key(key); + Int(i); + } + + void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); } + void UInt(const char *key, uint64_t u) { + Key(key); + UInt(u); + } + + void Float(float f) { stack_.push_back(Value(f)); } + void Float(const char *key, float f) { + Key(key); + Float(f); + } + + void Double(double f) { stack_.push_back(Value(f)); } + void Double(const char *key, double d) { + Key(key); + Double(d); + } + + void Bool(bool b) { stack_.push_back(Value(b)); } + void Bool(const char *key, bool b) { + Key(key); + Bool(b); + } + + void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); } + void IndirectInt(const char *key, int64_t i) { + Key(key); + IndirectInt(i); + } + + void IndirectUInt(uint64_t u) { + PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); + } + void IndirectUInt(const char *key, uint64_t u) { + Key(key); + IndirectUInt(u); + } + + void IndirectFloat(float f) { + PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); + } + void IndirectFloat(const char *key, float f) { + Key(key); + IndirectFloat(f); + } + + void IndirectDouble(double f) { + PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); + } + void IndirectDouble(const char *key, double d) { + Key(key); + IndirectDouble(d); + } + + size_t Key(const char *str, size_t len) { + auto sloc = buf_.size(); + WriteBytes(str, len + 1); + if (flags_ & BUILDER_FLAG_SHARE_KEYS) { + auto it = key_pool.find(sloc); + if (it != key_pool.end()) { + // Already in the buffer. Remove key we just serialized, and use + // existing offset instead. + buf_.resize(sloc); + sloc = *it; + } else { + key_pool.insert(sloc); + } + } + stack_.push_back(Value(static_cast(sloc), FBT_KEY, BIT_WIDTH_8)); + return sloc; + } + + size_t Key(const char *str) { return Key(str, strlen(str)); } + size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); } + + size_t String(const char *str, size_t len) { + auto reset_to = buf_.size(); + auto sloc = CreateBlob(str, len, 1, FBT_STRING); + if (flags_ & BUILDER_FLAG_SHARE_STRINGS) { + StringOffset so(sloc, len); + auto it = string_pool.find(so); + if (it != string_pool.end()) { + // Already in the buffer. Remove string we just serialized, and use + // existing offset instead. + buf_.resize(reset_to); + sloc = it->first; + stack_.back().u_ = sloc; + } else { + string_pool.insert(so); + } + } + return sloc; + } + size_t String(const char *str) { return String(str, strlen(str)); } + size_t String(const std::string &str) { + return String(str.c_str(), str.size()); + } + void String(const flexbuffers::String &str) { + String(str.c_str(), str.length()); + } + + void String(const char *key, const char *str) { + Key(key); + String(str); + } + void String(const char *key, const std::string &str) { + Key(key); + String(str); + } + void String(const char *key, const flexbuffers::String &str) { + Key(key); + String(str); + } + + size_t Blob(const void *data, size_t len) { + return CreateBlob(data, len, 0, FBT_BLOB); + } + size_t Blob(const std::vector &v) { + return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB); + } + + // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String), + // e.g. Vector etc. Also in overloaded versions. + // Also some FlatBuffers types? + + size_t StartVector() { return stack_.size(); } + size_t StartVector(const char *key) { + Key(key); + return stack_.size(); + } + size_t StartMap() { return stack_.size(); } + size_t StartMap(const char *key) { + Key(key); + return stack_.size(); + } + + // TODO(wvo): allow this to specify an aligment greater than the natural + // alignment. + size_t EndVector(size_t start, bool typed, bool fixed) { + auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed); + // Remove temp elements and return vector. + stack_.resize(start); + stack_.push_back(vec); + return static_cast(vec.u_); + } + + size_t EndMap(size_t start) { + // We should have interleaved keys and values on the stack. + // Make sure it is an even number: + auto len = stack_.size() - start; + FLATBUFFERS_ASSERT(!(len & 1)); + len /= 2; + // Make sure keys are all strings: + for (auto key = start; key < stack_.size(); key += 2) { + FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY); + } + // Now sort values, so later we can do a binary search lookup. + // We want to sort 2 array elements at a time. + struct TwoValue { + Value key; + Value val; + }; + // TODO(wvo): strict aliasing? + // TODO(wvo): allow the caller to indicate the data is already sorted + // for maximum efficiency? With an assert to check sortedness to make sure + // we're not breaking binary search. + // Or, we can track if the map is sorted as keys are added which would be + // be quite cheap (cheaper than checking it here), so we can skip this + // step automatically when appliccable, and encourage people to write in + // sorted fashion. + // std::sort is typically already a lot faster on sorted data though. + auto dict = + reinterpret_cast(flatbuffers::vector_data(stack_) + start); + std::sort(dict, dict + len, + [&](const TwoValue &a, const TwoValue &b) -> bool { + auto as = reinterpret_cast( + flatbuffers::vector_data(buf_) + a.key.u_); + auto bs = reinterpret_cast( + flatbuffers::vector_data(buf_) + b.key.u_); + auto comp = strcmp(as, bs); + // If this assertion hits, you've added two keys with the same + // value to this map. + // TODO: Have to check for pointer equality, as some sort + // implementation apparently call this function with the same + // element?? Why? + FLATBUFFERS_ASSERT(comp || &a == &b); + return comp < 0; + }); + // First create a vector out of all keys. + // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share + // the first vector. + auto keys = CreateVector(start, len, 2, true, false); + auto vec = CreateVector(start + 1, len, 2, false, false, &keys); + // Remove temp elements and return map. + stack_.resize(start); + stack_.push_back(vec); + return static_cast(vec.u_); + } + + template size_t Vector(F f) { + auto start = StartVector(); + f(); + return EndVector(start, false, false); + } + template size_t Vector(F f, T &state) { + auto start = StartVector(); + f(state); + return EndVector(start, false, false); + } + template size_t Vector(const char *key, F f) { + auto start = StartVector(key); + f(); + return EndVector(start, false, false); + } + template + size_t Vector(const char *key, F f, T &state) { + auto start = StartVector(key); + f(state); + return EndVector(start, false, false); + } + + template void Vector(const T *elems, size_t len) { + if (flatbuffers::is_scalar::value) { + // This path should be a lot quicker and use less space. + ScalarVector(elems, len, false); + } else { + auto start = StartVector(); + for (size_t i = 0; i < len; i++) Add(elems[i]); + EndVector(start, false, false); + } + } + template + void Vector(const char *key, const T *elems, size_t len) { + Key(key); + Vector(elems, len); + } + template void Vector(const std::vector &vec) { + Vector(flatbuffers::vector_data(vec), vec.size()); + } + + template size_t TypedVector(F f) { + auto start = StartVector(); + f(); + return EndVector(start, true, false); + } + template size_t TypedVector(F f, T &state) { + auto start = StartVector(); + f(state); + return EndVector(start, true, false); + } + template size_t TypedVector(const char *key, F f) { + auto start = StartVector(key); + f(); + return EndVector(start, true, false); + } + template + size_t TypedVector(const char *key, F f, T &state) { + auto start = StartVector(key); + f(state); + return EndVector(start, true, false); + } + + template size_t FixedTypedVector(const T *elems, size_t len) { + // We only support a few fixed vector lengths. Anything bigger use a + // regular typed vector. + FLATBUFFERS_ASSERT(len >= 2 && len <= 4); + // And only scalar values. + static_assert(flatbuffers::is_scalar::value, "Unrelated types"); + return ScalarVector(elems, len, true); + } + + template + size_t FixedTypedVector(const char *key, const T *elems, size_t len) { + Key(key); + return FixedTypedVector(elems, len); + } + + template size_t Map(F f) { + auto start = StartMap(); + f(); + return EndMap(start); + } + template size_t Map(F f, T &state) { + auto start = StartMap(); + f(state); + return EndMap(start); + } + template size_t Map(const char *key, F f) { + auto start = StartMap(key); + f(); + return EndMap(start); + } + template size_t Map(const char *key, F f, T &state) { + auto start = StartMap(key); + f(state); + return EndMap(start); + } + template void Map(const std::map &map) { + auto start = StartMap(); + for (auto it = map.begin(); it != map.end(); ++it) + Add(it->first.c_str(), it->second); + EndMap(start); + } + + // If you wish to share a value explicitly (a value not shared automatically + // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these + // functions. Or if you wish to turn those flags off for performance reasons + // and still do some explicit sharing. For example: + // builder.IndirectDouble(M_PI); + // auto id = builder.LastValue(); // Remember where we stored it. + // .. more code goes here .. + // builder.ReuseValue(id); // Refers to same double by offset. + // LastValue works regardless of whether the value has a key or not. + // Works on any data type. + struct Value; + Value LastValue() { return stack_.back(); } + void ReuseValue(Value v) { stack_.push_back(v); } + void ReuseValue(const char *key, Value v) { + Key(key); + ReuseValue(v); + } + + // Overloaded Add that tries to call the correct function above. + void Add(int8_t i) { Int(i); } + void Add(int16_t i) { Int(i); } + void Add(int32_t i) { Int(i); } + void Add(int64_t i) { Int(i); } + void Add(uint8_t u) { UInt(u); } + void Add(uint16_t u) { UInt(u); } + void Add(uint32_t u) { UInt(u); } + void Add(uint64_t u) { UInt(u); } + void Add(float f) { Float(f); } + void Add(double d) { Double(d); } + void Add(bool b) { Bool(b); } + void Add(const char *str) { String(str); } + void Add(const std::string &str) { String(str); } + void Add(const flexbuffers::String &str) { String(str); } + + template void Add(const std::vector &vec) { Vector(vec); } + + template void Add(const char *key, const T &t) { + Key(key); + Add(t); + } + + template void Add(const std::map &map) { + Map(map); + } + + template void operator+=(const T &t) { Add(t); } + + // This function is useful in combination with the Mutate* functions above. + // It forces elements of vectors and maps to have a minimum size, such that + // they can later be updated without failing. + // Call with no arguments to reset. + void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { + force_min_bit_width_ = bw; + } + + void Finish() { + // If you hit this assert, you likely have objects that were never included + // in a parent. You need to have exactly one root to finish a buffer. + // Check your Start/End calls are matched, and all objects are inside + // some other object. + FLATBUFFERS_ASSERT(stack_.size() == 1); + + // Write root value. + auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0)); + WriteAny(stack_[0], byte_width); + // Write root type. + Write(stack_[0].StoredPackedType(), 1); + // Write root size. Normally determined by parent, but root has no parent :) + Write(byte_width, 1); + + finished_ = true; + } + + private: + void Finished() const { + // If you get this assert, you're attempting to get access a buffer + // which hasn't been finished yet. Be sure to call + // Builder::Finish with your root object. + FLATBUFFERS_ASSERT(finished_); + } + + // Align to prepare for writing a scalar with a certain size. + uint8_t Align(BitWidth alignment) { + auto byte_width = 1U << alignment; + buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), + 0); + return static_cast(byte_width); + } + + void WriteBytes(const void *val, size_t size) { + buf_.insert(buf_.end(), reinterpret_cast(val), + reinterpret_cast(val) + size); + } + + template void Write(T val, size_t byte_width) { + FLATBUFFERS_ASSERT(sizeof(T) >= byte_width); + val = flatbuffers::EndianScalar(val); + WriteBytes(&val, byte_width); + } + + void WriteDouble(double f, uint8_t byte_width) { + switch (byte_width) { + case 8: Write(f, byte_width); break; + case 4: Write(static_cast(f), byte_width); break; + // case 2: Write(static_cast(f), byte_width); break; + // case 1: Write(static_cast(f), byte_width); break; + default: FLATBUFFERS_ASSERT(0); + } + } + + void WriteOffset(uint64_t o, uint8_t byte_width) { + auto reloff = buf_.size() - o; + FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8)); + Write(reloff, byte_width); + } + + template void PushIndirect(T val, Type type, BitWidth bit_width) { + auto byte_width = Align(bit_width); + auto iloc = buf_.size(); + Write(val, byte_width); + stack_.push_back(Value(static_cast(iloc), type, bit_width)); + } + + static BitWidth WidthB(size_t byte_width) { + switch (byte_width) { + case 1: return BIT_WIDTH_8; + case 2: return BIT_WIDTH_16; + case 4: return BIT_WIDTH_32; + case 8: return BIT_WIDTH_64; + default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64; + } + } + + template static Type GetScalarType() { + static_assert(flatbuffers::is_scalar::value, "Unrelated types"); + return flatbuffers::is_floating_point::value + ? FBT_FLOAT + : flatbuffers::is_same::value + ? FBT_BOOL + : (flatbuffers::is_unsigned::value ? FBT_UINT + : FBT_INT); + } + + public: + // This was really intended to be private, except for LastValue/ReuseValue. + struct Value { + union { + int64_t i_; + uint64_t u_; + double f_; + }; + + Type type_; + + // For scalars: of itself, for vector: of its elements, for string: length. + BitWidth min_bit_width_; + + Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {} + + Value(bool b) + : u_(static_cast(b)), + type_(FBT_BOOL), + min_bit_width_(BIT_WIDTH_8) {} + + Value(int64_t i, Type t, BitWidth bw) + : i_(i), type_(t), min_bit_width_(bw) {} + Value(uint64_t u, Type t, BitWidth bw) + : u_(u), type_(t), min_bit_width_(bw) {} + + Value(float f) + : f_(static_cast(f)), + type_(FBT_FLOAT), + min_bit_width_(BIT_WIDTH_32) {} + Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {} + + uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { + return PackedType(StoredWidth(parent_bit_width_), type_); + } + + BitWidth ElemWidth(size_t buf_size, size_t elem_index) const { + if (IsInline(type_)) { + return min_bit_width_; + } else { + // We have an absolute offset, but want to store a relative offset + // elem_index elements beyond the current buffer end. Since whether + // the relative offset fits in a certain byte_width depends on + // the size of the elements before it (and their alignment), we have + // to test for each size in turn. + for (size_t byte_width = 1; + byte_width <= sizeof(flatbuffers::largest_scalar_t); + byte_width *= 2) { + // Where are we going to write this offset? + auto offset_loc = buf_size + + flatbuffers::PaddingBytes(buf_size, byte_width) + + elem_index * byte_width; + // Compute relative offset. + auto offset = offset_loc - u_; + // Does it fit? + auto bit_width = WidthU(offset); + if (static_cast(static_cast(1U) << bit_width) == + byte_width) + return bit_width; + } + FLATBUFFERS_ASSERT(false); // Must match one of the sizes above. + return BIT_WIDTH_64; + } + } + + BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { + if (IsInline(type_)) { + return (std::max)(min_bit_width_, parent_bit_width_); + } else { + return min_bit_width_; + } + } + }; + + private: + void WriteAny(const Value &val, uint8_t byte_width) { + switch (val.type_) { + case FBT_NULL: + case FBT_INT: Write(val.i_, byte_width); break; + case FBT_BOOL: + case FBT_UINT: Write(val.u_, byte_width); break; + case FBT_FLOAT: WriteDouble(val.f_, byte_width); break; + default: WriteOffset(val.u_, byte_width); break; + } + } + + size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) { + auto bit_width = WidthU(len); + auto byte_width = Align(bit_width); + Write(len, byte_width); + auto sloc = buf_.size(); + WriteBytes(data, len + trailing); + stack_.push_back(Value(static_cast(sloc), type, bit_width)); + return sloc; + } + + template + size_t ScalarVector(const T *elems, size_t len, bool fixed) { + auto vector_type = GetScalarType(); + auto byte_width = sizeof(T); + auto bit_width = WidthB(byte_width); + // If you get this assert, you're trying to write a vector with a size + // field that is bigger than the scalars you're trying to write (e.g. a + // byte vector > 255 elements). For such types, write a "blob" instead. + // TODO: instead of asserting, could write vector with larger elements + // instead, though that would be wasteful. + FLATBUFFERS_ASSERT(WidthU(len) <= bit_width); + Align(bit_width); + if (!fixed) Write(len, byte_width); + auto vloc = buf_.size(); + for (size_t i = 0; i < len; i++) Write(elems[i], byte_width); + stack_.push_back(Value(static_cast(vloc), + ToTypedVector(vector_type, fixed ? len : 0), + bit_width)); + return vloc; + } + + Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, + bool fixed, const Value *keys = nullptr) { + FLATBUFFERS_ASSERT( + !fixed || + typed); // typed=false, fixed=true combination is not supported. + // Figure out smallest bit width we can store this vector with. + auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len)); + auto prefix_elems = 1; + if (keys) { + // If this vector is part of a map, we will pre-fix an offset to the keys + // to this vector. + bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0)); + prefix_elems += 2; + } + Type vector_type = FBT_KEY; + // Check bit widths and types for all elements. + for (size_t i = start; i < stack_.size(); i += step) { + auto elem_width = + stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems); + bit_width = (std::max)(bit_width, elem_width); + if (typed) { + if (i == start) { + vector_type = stack_[i].type_; + } else { + // If you get this assert, you are writing a typed vector with + // elements that are not all the same type. + FLATBUFFERS_ASSERT(vector_type == stack_[i].type_); + } + } + } + // If you get this assert, your fixed types are not one of: + // Int / UInt / Float / Key. + FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type)); + auto byte_width = Align(bit_width); + // Write vector. First the keys width/offset if available, and size. + if (keys) { + WriteOffset(keys->u_, byte_width); + Write(1ULL << keys->min_bit_width_, byte_width); + } + if (!fixed) Write(vec_len, byte_width); + // Then the actual data. + auto vloc = buf_.size(); + for (size_t i = start; i < stack_.size(); i += step) { + WriteAny(stack_[i], byte_width); + } + // Then the types. + if (!typed) { + for (size_t i = start; i < stack_.size(); i += step) { + buf_.push_back(stack_[i].StoredPackedType(bit_width)); + } + } + return Value(static_cast(vloc), + keys ? FBT_MAP + : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) + : FBT_VECTOR), + bit_width); + } + + // You shouldn't really be copying instances of this class. + Builder(const Builder &); + Builder &operator=(const Builder &); + + std::vector buf_; + std::vector stack_; + + bool finished_; + + BuilderFlag flags_; + + BitWidth force_min_bit_width_; + + struct KeyOffsetCompare { + explicit KeyOffsetCompare(const std::vector &buf) : buf_(&buf) {} + bool operator()(size_t a, size_t b) const { + auto stra = + reinterpret_cast(flatbuffers::vector_data(*buf_) + a); + auto strb = + reinterpret_cast(flatbuffers::vector_data(*buf_) + b); + return strcmp(stra, strb) < 0; + } + const std::vector *buf_; + }; + + typedef std::pair StringOffset; + struct StringOffsetCompare { + explicit StringOffsetCompare(const std::vector &buf) + : buf_(&buf) {} + bool operator()(const StringOffset &a, const StringOffset &b) const { + auto stra = reinterpret_cast( + flatbuffers::vector_data(*buf_) + a.first); + auto strb = reinterpret_cast( + flatbuffers::vector_data(*buf_) + b.first); + return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0; + } + const std::vector *buf_; + }; + + typedef std::set KeyOffsetMap; + typedef std::set StringOffsetMap; + + KeyOffsetMap key_pool; + StringOffsetMap string_pool; +}; + +} // namespace flexbuffers + +#if defined(_MSC_VER) +# pragma warning(pop) +#endif + +#endif // FLATBUFFERS_FLEXBUFFERS_H_ diff --git a/code/components/tfmicro/third_party/flatbuffers/include/flatbuffers/util.h b/code/components/tfmicro/third_party/flatbuffers/include/flatbuffers/util.h new file mode 100644 index 00000000..2aafa482 --- /dev/null +++ b/code/components/tfmicro/third_party/flatbuffers/include/flatbuffers/util.h @@ -0,0 +1,692 @@ +/* + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FLATBUFFERS_UTIL_H_ +#define FLATBUFFERS_UTIL_H_ + +#include + +#include "flatbuffers/base.h" +#include "flatbuffers/stl_emulation.h" + +#ifndef FLATBUFFERS_PREFER_PRINTF +# include +#else // FLATBUFFERS_PREFER_PRINTF +# include +# include +#endif // FLATBUFFERS_PREFER_PRINTF + +#include +#include + +namespace flatbuffers { + +// @locale-independent functions for ASCII characters set. + +// Fast checking that character lies in closed range: [a <= x <= b] +// using one compare (conditional branch) operator. +inline bool check_ascii_range(char x, char a, char b) { + FLATBUFFERS_ASSERT(a <= b); + // (Hacker's Delight): `a <= x <= b` <=> `(x-a) <={u} (b-a)`. + // The x, a, b will be promoted to int and subtracted without overflow. + return static_cast(x - a) <= static_cast(b - a); +} + +// Case-insensitive isalpha +inline bool is_alpha(char c) { + // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF). + return check_ascii_range(c & 0xDF, 'a' & 0xDF, 'z' & 0xDF); +} + +// Check (case-insensitive) that `c` is equal to alpha. +inline bool is_alpha_char(char c, char alpha) { + FLATBUFFERS_ASSERT(is_alpha(alpha)); + // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF). + return ((c & 0xDF) == (alpha & 0xDF)); +} + +// https://en.cppreference.com/w/cpp/string/byte/isxdigit +// isdigit and isxdigit are the only standard narrow character classification +// functions that are not affected by the currently installed C locale. although +// some implementations (e.g. Microsoft in 1252 codepage) may classify +// additional single-byte characters as digits. +inline bool is_digit(char c) { return check_ascii_range(c, '0', '9'); } + +inline bool is_xdigit(char c) { + // Replace by look-up table. + return is_digit(c) || check_ascii_range(c & 0xDF, 'a' & 0xDF, 'f' & 0xDF); +} + +// Case-insensitive isalnum +inline bool is_alnum(char c) { return is_alpha(c) || is_digit(c); } + +inline char CharToUpper(char c) { + return static_cast(::toupper(static_cast(c))); +} + +inline char CharToLower(char c) { + return static_cast(::tolower(static_cast(c))); +} + +// @end-locale-independent functions for ASCII character set + +#ifdef FLATBUFFERS_PREFER_PRINTF +template size_t IntToDigitCount(T t) { + size_t digit_count = 0; + // Count the sign for negative numbers + if (t < 0) digit_count++; + // Count a single 0 left of the dot for fractional numbers + if (-1 < t && t < 1) digit_count++; + // Count digits until fractional part + T eps = std::numeric_limits::epsilon(); + while (t <= (-1 + eps) || (1 - eps) <= t) { + t /= 10; + digit_count++; + } + return digit_count; +} + +template size_t NumToStringWidth(T t, int precision = 0) { + size_t string_width = IntToDigitCount(t); + // Count the dot for floating point numbers + if (precision) string_width += (precision + 1); + return string_width; +} + +template +std::string NumToStringImplWrapper(T t, const char *fmt, int precision = 0) { + size_t string_width = NumToStringWidth(t, precision); + std::string s(string_width, 0x00); + // Allow snprintf to use std::string trailing null to detect buffer overflow + snprintf(const_cast(s.data()), (s.size() + 1), fmt, string_width, t); + return s; +} +#endif // FLATBUFFERS_PREFER_PRINTF + +// Convert an integer or floating point value to a string. +// In contrast to std::stringstream, "char" values are +// converted to a string of digits, and we don't use scientific notation. +template std::string NumToString(T t) { + // clang-format off + + #ifndef FLATBUFFERS_PREFER_PRINTF + std::stringstream ss; + ss << t; + return ss.str(); + #else // FLATBUFFERS_PREFER_PRINTF + auto v = static_cast(t); + return NumToStringImplWrapper(v, "%.*lld"); + #endif // FLATBUFFERS_PREFER_PRINTF + // clang-format on +} +// Avoid char types used as character data. +template<> inline std::string NumToString(signed char t) { + return NumToString(static_cast(t)); +} +template<> inline std::string NumToString(unsigned char t) { + return NumToString(static_cast(t)); +} +template<> inline std::string NumToString(char t) { + return NumToString(static_cast(t)); +} +#if defined(FLATBUFFERS_CPP98_STL) +template<> inline std::string NumToString(long long t) { + char buf[21]; // (log((1 << 63) - 1) / log(10)) + 2 + snprintf(buf, sizeof(buf), "%lld", t); + return std::string(buf); +} + +template<> +inline std::string NumToString(unsigned long long t) { + char buf[22]; // (log((1 << 63) - 1) / log(10)) + 1 + snprintf(buf, sizeof(buf), "%llu", t); + return std::string(buf); +} +#endif // defined(FLATBUFFERS_CPP98_STL) + +// Special versions for floats/doubles. +template std::string FloatToString(T t, int precision) { + // clang-format off + + #ifndef FLATBUFFERS_PREFER_PRINTF + // to_string() prints different numbers of digits for floats depending on + // platform and isn't available on Android, so we use stringstream + std::stringstream ss; + // Use std::fixed to suppress scientific notation. + ss << std::fixed; + // Default precision is 6, we want that to be higher for doubles. + ss << std::setprecision(precision); + ss << t; + auto s = ss.str(); + #else // FLATBUFFERS_PREFER_PRINTF + auto v = static_cast(t); + auto s = NumToStringImplWrapper(v, "%0.*f", precision); + #endif // FLATBUFFERS_PREFER_PRINTF + // clang-format on + // Sadly, std::fixed turns "1" into "1.00000", so here we undo that. + auto p = s.find_last_not_of('0'); + if (p != std::string::npos) { + // Strip trailing zeroes. If it is a whole number, keep one zero. + s.resize(p + (s[p] == '.' ? 2 : 1)); + } + return s; +} + +template<> inline std::string NumToString(double t) { + return FloatToString(t, 12); +} +template<> inline std::string NumToString(float t) { + return FloatToString(t, 6); +} + +// Convert an integer value to a hexadecimal string. +// The returned string length is always xdigits long, prefixed by 0 digits. +// For example, IntToStringHex(0x23, 8) returns the string "00000023". +inline std::string IntToStringHex(int i, int xdigits) { + FLATBUFFERS_ASSERT(i >= 0); + // clang-format off + + #ifndef FLATBUFFERS_PREFER_PRINTF + std::stringstream ss; + ss << std::setw(xdigits) << std::setfill('0') << std::hex << std::uppercase + << i; + return ss.str(); + #else // FLATBUFFERS_PREFER_PRINTF + return NumToStringImplWrapper(i, "%.*X", xdigits); + #endif // FLATBUFFERS_PREFER_PRINTF + // clang-format on +} + +// clang-format off +// Use locale independent functions {strtod_l, strtof_l, strtoll_l, strtoull_l}. +#if defined(FLATBUFFERS_LOCALE_INDEPENDENT) && (FLATBUFFERS_LOCALE_INDEPENDENT > 0) + class ClassicLocale { + #ifdef _MSC_VER + typedef _locale_t locale_type; + #else + typedef locale_t locale_type; // POSIX.1-2008 locale_t type + #endif + ClassicLocale(); + ~ClassicLocale(); + locale_type locale_; + static ClassicLocale instance_; + public: + static locale_type Get() { return instance_.locale_; } + }; + + #ifdef _MSC_VER + #define __strtoull_impl(s, pe, b) _strtoui64_l(s, pe, b, ClassicLocale::Get()) + #define __strtoll_impl(s, pe, b) _strtoi64_l(s, pe, b, ClassicLocale::Get()) + #define __strtod_impl(s, pe) _strtod_l(s, pe, ClassicLocale::Get()) + #define __strtof_impl(s, pe) _strtof_l(s, pe, ClassicLocale::Get()) + #else + #define __strtoull_impl(s, pe, b) strtoull_l(s, pe, b, ClassicLocale::Get()) + #define __strtoll_impl(s, pe, b) strtoll_l(s, pe, b, ClassicLocale::Get()) + #define __strtod_impl(s, pe) strtod_l(s, pe, ClassicLocale::Get()) + #define __strtof_impl(s, pe) strtof_l(s, pe, ClassicLocale::Get()) + #endif +#else + #define __strtod_impl(s, pe) strtod(s, pe) + #define __strtof_impl(s, pe) static_cast(strtod(s, pe)) + #ifdef _MSC_VER + #define __strtoull_impl(s, pe, b) _strtoui64(s, pe, b) + #define __strtoll_impl(s, pe, b) _strtoi64(s, pe, b) + #else + #define __strtoull_impl(s, pe, b) strtoull(s, pe, b) + #define __strtoll_impl(s, pe, b) strtoll(s, pe, b) + #endif +#endif + +inline void strtoval_impl(int64_t *val, const char *str, char **endptr, + int base) { + *val = __strtoll_impl(str, endptr, base); +} + +inline void strtoval_impl(uint64_t *val, const char *str, char **endptr, + int base) { + *val = __strtoull_impl(str, endptr, base); +} + +inline void strtoval_impl(double *val, const char *str, char **endptr) { + *val = __strtod_impl(str, endptr); +} + +// UBSAN: double to float is safe if numeric_limits::is_iec559 is true. +__supress_ubsan__("float-cast-overflow") +inline void strtoval_impl(float *val, const char *str, char **endptr) { + *val = __strtof_impl(str, endptr); +} +#undef __strtoull_impl +#undef __strtoll_impl +#undef __strtod_impl +#undef __strtof_impl +// clang-format on + +// Adaptor for strtoull()/strtoll(). +// Flatbuffers accepts numbers with any count of leading zeros (-009 is -9), +// while strtoll with base=0 interprets first leading zero as octal prefix. +// In future, it is possible to add prefixed 0b0101. +// 1) Checks errno code for overflow condition (out of range). +// 2) If base <= 0, function try to detect base of number by prefix. +// +// Return value (like strtoull and strtoll, but reject partial result): +// - If successful, an integer value corresponding to the str is returned. +// - If full string conversion can't be performed, 0 is returned. +// - If the converted value falls out of range of corresponding return type, a +// range error occurs. In this case value MAX(T)/MIN(T) is returned. +template +inline bool StringToIntegerImpl(T *val, const char *const str, + const int base = 0, + const bool check_errno = true) { + // T is int64_t or uint64_T + FLATBUFFERS_ASSERT(str); + if (base <= 0) { + auto s = str; + while (*s && !is_digit(*s)) s++; + if (s[0] == '0' && is_alpha_char(s[1], 'X')) + return StringToIntegerImpl(val, str, 16, check_errno); + // if a prefix not match, try base=10 + return StringToIntegerImpl(val, str, 10, check_errno); + } else { + if (check_errno) errno = 0; // clear thread-local errno + auto endptr = str; + strtoval_impl(val, str, const_cast(&endptr), base); + if ((*endptr != '\0') || (endptr == str)) { + *val = 0; // erase partial result + return false; // invalid string + } + // errno is out-of-range, return MAX/MIN + if (check_errno && errno) return false; + return true; + } +} + +template +inline bool StringToFloatImpl(T *val, const char *const str) { + // Type T must be either float or double. + FLATBUFFERS_ASSERT(str && val); + auto end = str; + strtoval_impl(val, str, const_cast(&end)); + auto done = (end != str) && (*end == '\0'); + if (!done) *val = 0; // erase partial result + return done; +} + +// Convert a string to an instance of T. +// Return value (matched with StringToInteger64Impl and strtod): +// - If successful, a numeric value corresponding to the str is returned. +// - If full string conversion can't be performed, 0 is returned. +// - If the converted value falls out of range of corresponding return type, a +// range error occurs. In this case value MAX(T)/MIN(T) is returned. +template inline bool StringToNumber(const char *s, T *val) { + FLATBUFFERS_ASSERT(s && val); + int64_t i64; + // The errno check isn't needed, will return MAX/MIN on overflow. + if (StringToIntegerImpl(&i64, s, 0, false)) { + const int64_t max = (flatbuffers::numeric_limits::max)(); + const int64_t min = flatbuffers::numeric_limits::lowest(); + if (i64 > max) { + *val = static_cast(max); + return false; + } + if (i64 < min) { + // For unsigned types return max to distinguish from + // "no conversion can be performed" when 0 is returned. + *val = static_cast(flatbuffers::is_unsigned::value ? max : min); + return false; + } + *val = static_cast(i64); + return true; + } + *val = 0; + return false; +} + +template<> inline bool StringToNumber(const char *str, int64_t *val) { + return StringToIntegerImpl(val, str); +} + +template<> +inline bool StringToNumber(const char *str, uint64_t *val) { + if (!StringToIntegerImpl(val, str)) return false; + // The strtoull accepts negative numbers: + // If the minus sign was part of the input sequence, the numeric value + // calculated from the sequence of digits is negated as if by unary minus + // in the result type, which applies unsigned integer wraparound rules. + // Fix this behaviour (except -0). + if (*val) { + auto s = str; + while (*s && !is_digit(*s)) s++; + s = (s > str) ? (s - 1) : s; // step back to one symbol + if (*s == '-') { + // For unsigned types return the max to distinguish from + // "no conversion can be performed". + *val = (flatbuffers::numeric_limits::max)(); + return false; + } + } + return true; +} + +template<> inline bool StringToNumber(const char *s, float *val) { + return StringToFloatImpl(val, s); +} + +template<> inline bool StringToNumber(const char *s, double *val) { + return StringToFloatImpl(val, s); +} + +inline int64_t StringToInt(const char *s, int base = 10) { + int64_t val; + return StringToIntegerImpl(&val, s, base) ? val : 0; +} + +inline uint64_t StringToUInt(const char *s, int base = 10) { + uint64_t val; + return StringToIntegerImpl(&val, s, base) ? val : 0; +} + +typedef bool (*LoadFileFunction)(const char *filename, bool binary, + std::string *dest); +typedef bool (*FileExistsFunction)(const char *filename); + +LoadFileFunction SetLoadFileFunction(LoadFileFunction load_file_function); + +FileExistsFunction SetFileExistsFunction( + FileExistsFunction file_exists_function); + +// Check if file "name" exists. +bool FileExists(const char *name); + +// Check if "name" exists and it is also a directory. +bool DirExists(const char *name); + +// Load file "name" into "buf" returning true if successful +// false otherwise. If "binary" is false data is read +// using ifstream's text mode, otherwise data is read with +// no transcoding. +bool LoadFile(const char *name, bool binary, std::string *buf); + +// Save data "buf" of length "len" bytes into a file +// "name" returning true if successful, false otherwise. +// If "binary" is false data is written using ifstream's +// text mode, otherwise data is written with no +// transcoding. +bool SaveFile(const char *name, const char *buf, size_t len, bool binary); + +// Save data "buf" into file "name" returning true if +// successful, false otherwise. If "binary" is false +// data is written using ifstream's text mode, otherwise +// data is written with no transcoding. +inline bool SaveFile(const char *name, const std::string &buf, bool binary) { + return SaveFile(name, buf.c_str(), buf.size(), binary); +} + +// Functionality for minimalistic portable path handling. + +// The functions below behave correctly regardless of whether posix ('/') or +// Windows ('/' or '\\') separators are used. + +// Any new separators inserted are always posix. +FLATBUFFERS_CONSTEXPR char kPathSeparator = '/'; + +// Returns the path with the extension, if any, removed. +std::string StripExtension(const std::string &filepath); + +// Returns the extension, if any. +std::string GetExtension(const std::string &filepath); + +// Return the last component of the path, after the last separator. +std::string StripPath(const std::string &filepath); + +// Strip the last component of the path + separator. +std::string StripFileName(const std::string &filepath); + +// Concatenates a path with a filename, regardless of whether the path +// ends in a separator or not. +std::string ConCatPathFileName(const std::string &path, + const std::string &filename); + +// Replaces any '\\' separators with '/' +std::string PosixPath(const char *path); + +// This function ensure a directory exists, by recursively +// creating dirs for any parts of the path that don't exist yet. +void EnsureDirExists(const std::string &filepath); + +// Obtains the absolute path from any other path. +// Returns the input path if the absolute path couldn't be resolved. +std::string AbsolutePath(const std::string &filepath); + +// To and from UTF-8 unicode conversion functions + +// Convert a unicode code point into a UTF-8 representation by appending it +// to a string. Returns the number of bytes generated. +inline int ToUTF8(uint32_t ucc, std::string *out) { + FLATBUFFERS_ASSERT(!(ucc & 0x80000000)); // Top bit can't be set. + // 6 possible encodings: http://en.wikipedia.org/wiki/UTF-8 + for (int i = 0; i < 6; i++) { + // Max bits this encoding can represent. + uint32_t max_bits = 6 + i * 5 + static_cast(!i); + if (ucc < (1u << max_bits)) { // does it fit? + // Remaining bits not encoded in the first byte, store 6 bits each + uint32_t remain_bits = i * 6; + // Store first byte: + (*out) += static_cast((0xFE << (max_bits - remain_bits)) | + (ucc >> remain_bits)); + // Store remaining bytes: + for (int j = i - 1; j >= 0; j--) { + (*out) += static_cast(((ucc >> (j * 6)) & 0x3F) | 0x80); + } + return i + 1; // Return the number of bytes added. + } + } + FLATBUFFERS_ASSERT(0); // Impossible to arrive here. + return -1; +} + +// Converts whatever prefix of the incoming string corresponds to a valid +// UTF-8 sequence into a unicode code. The incoming pointer will have been +// advanced past all bytes parsed. +// returns -1 upon corrupt UTF-8 encoding (ignore the incoming pointer in +// this case). +inline int FromUTF8(const char **in) { + int len = 0; + // Count leading 1 bits. + for (int mask = 0x80; mask >= 0x04; mask >>= 1) { + if (**in & mask) { + len++; + } else { + break; + } + } + if ((static_cast(**in) << len) & 0x80) + return -1; // Bit after leading 1's must be 0. + if (!len) return *(*in)++; + // UTF-8 encoded values with a length are between 2 and 4 bytes. + if (len < 2 || len > 4) { return -1; } + // Grab initial bits of the code. + int ucc = *(*in)++ & ((1 << (7 - len)) - 1); + for (int i = 0; i < len - 1; i++) { + if ((**in & 0xC0) != 0x80) return -1; // Upper bits must 1 0. + ucc <<= 6; + ucc |= *(*in)++ & 0x3F; // Grab 6 more bits of the code. + } + // UTF-8 cannot encode values between 0xD800 and 0xDFFF (reserved for + // UTF-16 surrogate pairs). + if (ucc >= 0xD800 && ucc <= 0xDFFF) { return -1; } + // UTF-8 must represent code points in their shortest possible encoding. + switch (len) { + case 2: + // Two bytes of UTF-8 can represent code points from U+0080 to U+07FF. + if (ucc < 0x0080 || ucc > 0x07FF) { return -1; } + break; + case 3: + // Three bytes of UTF-8 can represent code points from U+0800 to U+FFFF. + if (ucc < 0x0800 || ucc > 0xFFFF) { return -1; } + break; + case 4: + // Four bytes of UTF-8 can represent code points from U+10000 to U+10FFFF. + if (ucc < 0x10000 || ucc > 0x10FFFF) { return -1; } + break; + } + return ucc; +} + +#ifndef FLATBUFFERS_PREFER_PRINTF +// Wraps a string to a maximum length, inserting new lines where necessary. Any +// existing whitespace will be collapsed down to a single space. A prefix or +// suffix can be provided, which will be inserted before or after a wrapped +// line, respectively. +inline std::string WordWrap(const std::string in, size_t max_length, + const std::string wrapped_line_prefix, + const std::string wrapped_line_suffix) { + std::istringstream in_stream(in); + std::string wrapped, line, word; + + in_stream >> word; + line = word; + + while (in_stream >> word) { + if ((line.length() + 1 + word.length() + wrapped_line_suffix.length()) < + max_length) { + line += " " + word; + } else { + wrapped += line + wrapped_line_suffix + "\n"; + line = wrapped_line_prefix + word; + } + } + wrapped += line; + + return wrapped; +} +#endif // !FLATBUFFERS_PREFER_PRINTF + +inline bool EscapeString(const char *s, size_t length, std::string *_text, + bool allow_non_utf8, bool natural_utf8) { + std::string &text = *_text; + text += "\""; + for (uoffset_t i = 0; i < length; i++) { + char c = s[i]; + switch (c) { + case '\n': text += "\\n"; break; + case '\t': text += "\\t"; break; + case '\r': text += "\\r"; break; + case '\b': text += "\\b"; break; + case '\f': text += "\\f"; break; + case '\"': text += "\\\""; break; + case '\\': text += "\\\\"; break; + default: + if (c >= ' ' && c <= '~') { + text += c; + } else { + // Not printable ASCII data. Let's see if it's valid UTF-8 first: + const char *utf8 = s + i; + int ucc = FromUTF8(&utf8); + if (ucc < 0) { + if (allow_non_utf8) { + text += "\\x"; + text += IntToStringHex(static_cast(c), 2); + } else { + // There are two cases here: + // + // 1) We reached here by parsing an IDL file. In that case, + // we previously checked for non-UTF-8, so we shouldn't reach + // here. + // + // 2) We reached here by someone calling GenerateText() + // on a previously-serialized flatbuffer. The data might have + // non-UTF-8 Strings, or might be corrupt. + // + // In both cases, we have to give up and inform the caller + // they have no JSON. + return false; + } + } else { + if (natural_utf8) { + // utf8 points to past all utf-8 bytes parsed + text.append(s + i, static_cast(utf8 - s - i)); + } else if (ucc <= 0xFFFF) { + // Parses as Unicode within JSON's \uXXXX range, so use that. + text += "\\u"; + text += IntToStringHex(ucc, 4); + } else if (ucc <= 0x10FFFF) { + // Encode Unicode SMP values to a surrogate pair using two \u + // escapes. + uint32_t base = ucc - 0x10000; + auto high_surrogate = (base >> 10) + 0xD800; + auto low_surrogate = (base & 0x03FF) + 0xDC00; + text += "\\u"; + text += IntToStringHex(high_surrogate, 4); + text += "\\u"; + text += IntToStringHex(low_surrogate, 4); + } + // Skip past characters recognized. + i = static_cast(utf8 - s - 1); + } + } + break; + } + } + text += "\""; + return true; +} + +inline std::string BufferToHexText(const void *buffer, size_t buffer_size, + size_t max_length, + const std::string &wrapped_line_prefix, + const std::string &wrapped_line_suffix) { + std::string text = wrapped_line_prefix; + size_t start_offset = 0; + const char *s = reinterpret_cast(buffer); + for (size_t i = 0; s && i < buffer_size; i++) { + // Last iteration or do we have more? + bool have_more = i + 1 < buffer_size; + text += "0x"; + text += IntToStringHex(static_cast(s[i]), 2); + if (have_more) { text += ','; } + // If we have more to process and we reached max_length + if (have_more && + text.size() + wrapped_line_suffix.size() >= start_offset + max_length) { + text += wrapped_line_suffix; + text += '\n'; + start_offset = text.size(); + text += wrapped_line_prefix; + } + } + text += wrapped_line_suffix; + return text; +} + +// Remove paired quotes in a string: "text"|'text' -> text. +std::string RemoveStringQuotes(const std::string &s); + +// Change th global C-locale to locale with name . +// Returns an actual locale name in <_value>, useful if locale_name is "" or +// null. +bool SetGlobalTestLocale(const char *locale_name, + std::string *_value = nullptr); + +// Read (or test) a value of environment variable. +bool ReadEnvironmentVariable(const char *var_name, + std::string *_value = nullptr); + +// MSVC specific: Send all assert reports to STDOUT to prevent CI hangs. +void SetupDefaultCRTReportMode(); + +} // namespace flatbuffers + +#endif // FLATBUFFERS_UTIL_H_ diff --git a/code/main/main.cpp b/code/main/main.cpp index 493ff17b..f3fbc556 100644 --- a/code/main/main.cpp +++ b/code/main/main.cpp @@ -24,8 +24,6 @@ #include "time_sntp.h" #include "ClassControllCamera.h" #include "server_main.h" -#include "server_camera.h" - #define __SD_USE_ONE_LINE_MODE__ @@ -115,7 +113,6 @@ void task_NoSDBlink(void *pvParameter) { gpio_pad_select_gpio(BLINK_GPIO); gpio_set_direction(BLINK_GPIO, GPIO_MODE_OUTPUT); - TickType_t xDelay; xDelay = 100 / portTICK_PERIOD_MS; @@ -127,33 +124,28 @@ void task_NoSDBlink(void *pvParameter) vTaskDelay( xDelay ); gpio_set_level(BLINK_GPIO, 0); vTaskDelay( xDelay ); - } vTaskDelete(NULL); //Delete this task if it exits from the loop above } extern "C" void app_main(void) { - printf("Do Reset Camera\n"); - PowerResetCamera(); - Camera.InitCam(); - Camera.LightOnOff(false); - if (!Init_NVS_SDCard()) { xTaskCreate(&task_NoSDBlink, "task_NoSDBlink", configMINIMAL_STACK_SIZE * 64, NULL, tskIDLE_PRIORITY+1, NULL); return; }; - CheckOTAUpdate(); + Camera.InitCam(); + LoadWlanFromFile("/sdcard/wlan.ini"); ConnectToWLAN(); printf("\nNetparameter: IP: %s - GW: %s - NetMask %s\n", getIPAddress().c_str(), getGW().c_str(), getNetMask().c_str()); TickType_t xDelay; xDelay = 2000 / portTICK_PERIOD_MS; - printf("Autoflow: sleep for : %ldms\n", (long) xDelay); +// printf("Autoflow: sleep for : %ldms\n", (long) xDelay); // LogFile.WriteToFile("Startsequence 06"); vTaskDelay( xDelay ); // LogFile.WriteToFile("Startsequence 07"); @@ -163,17 +155,14 @@ extern "C" void app_main(void) LogFile.WriteToFile("============================================================================================="); LogFile.SwitchOnOff(false); - std::string zw = gettimestring("%Y%m%d-%H%M%S"); - printf("time %s\n", zw.c_str()); +// std::string zw = gettimestring("%Y%m%d-%H%M%S"); +// printf("time %s\n", zw.c_str()); -// Camera.InitCam(); -// Camera.LightOnOff(false); xDelay = 2000 / portTICK_PERIOD_MS; printf("Autoflow: sleep for : %ldms\n", (long) xDelay); vTaskDelay( xDelay ); server = start_webserver(); - register_server_camera_uri(server); register_server_tflite_uri(server); register_server_file_uri(server, "/sdcard"); register_server_ota_sdcard_uri(server); @@ -185,4 +174,4 @@ extern "C" void app_main(void) register_server_main_uri(server, "/sdcard"); TFliteDoAutoStart(); -} +} \ No newline at end of file diff --git a/code/main/server_main.h b/code/main/server_main.h index e4f75b4c..96088c86 100644 --- a/code/main/server_main.h +++ b/code/main/server_main.h @@ -9,7 +9,7 @@ #include #include "nvs_flash.h" #include "tcpip_adapter.h" -#include "esp_eth.h" +//#include "esp_eth.h" #include diff --git a/code/main/version.cpp b/code/main/version.cpp index 59974913..1fb55414 100644 --- a/code/main/version.cpp +++ b/code/main/version.cpp @@ -1,4 +1,4 @@ -const char* GIT_REV="21a70c5"; +const char* GIT_REV="520f818"; const char* GIT_TAG=""; -const char* GIT_BRANCH="master"; -const char* BUILD_TIME="2021-04-05 10:14"; \ No newline at end of file +const char* GIT_BRANCH="rolling"; +const char* BUILD_TIME="2021-04-20 19:34"; \ No newline at end of file diff --git a/code/sdkconfig b/code/sdkconfig index 7028733c..fd0ae6a6 100644 --- a/code/sdkconfig +++ b/code/sdkconfig @@ -131,14 +131,15 @@ CONFIG_EXAMPLE_CONNECT_IPV6=y # # Compiler options # -CONFIG_COMPILER_OPTIMIZATION_DEFAULT=y -# CONFIG_COMPILER_OPTIMIZATION_SIZE is not set +# CONFIG_COMPILER_OPTIMIZATION_DEFAULT is not set +CONFIG_COMPILER_OPTIMIZATION_SIZE=y # CONFIG_COMPILER_OPTIMIZATION_PERF is not set # CONFIG_COMPILER_OPTIMIZATION_NONE is not set -CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_ENABLE=y -# CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT is not set +# CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_ENABLE is not set +CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y # CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE is not set -# CONFIG_COMPILER_CXX_EXCEPTIONS is not set +CONFIG_COMPILER_CXX_EXCEPTIONS=y +CONFIG_COMPILER_CXX_EXCEPTIONS_EMG_POOL_SIZE=0 # CONFIG_COMPILER_CXX_RTTI is not set CONFIG_COMPILER_STACK_CHECK_MODE_NONE=y # CONFIG_COMPILER_STACK_CHECK_MODE_NORM is not set @@ -246,9 +247,9 @@ CONFIG_ESP32_REV_MIN_0=y CONFIG_ESP32_REV_MIN=0 CONFIG_ESP32_DPORT_WORKAROUND=y # CONFIG_ESP32_DEFAULT_CPU_FREQ_80 is not set -CONFIG_ESP32_DEFAULT_CPU_FREQ_160=y -# CONFIG_ESP32_DEFAULT_CPU_FREQ_240 is not set -CONFIG_ESP32_DEFAULT_CPU_FREQ_MHZ=160 +# CONFIG_ESP32_DEFAULT_CPU_FREQ_160 is not set +CONFIG_ESP32_DEFAULT_CPU_FREQ_240=y +CONFIG_ESP32_DEFAULT_CPU_FREQ_MHZ=240 CONFIG_ESP32_SPIRAM_SUPPORT=y # @@ -397,18 +398,8 @@ CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU1=y # # Ethernet # -CONFIG_ETH_ENABLED=y -CONFIG_ETH_USE_ESP32_EMAC=y -CONFIG_ETH_PHY_INTERFACE_RMII=y -# CONFIG_ETH_PHY_INTERFACE_MII is not set -CONFIG_ETH_RMII_CLK_INPUT=y -# CONFIG_ETH_RMII_CLK_OUTPUT is not set -CONFIG_ETH_RMII_CLK_IN_GPIO=0 -CONFIG_ETH_DMA_BUFFER_SIZE=512 -CONFIG_ETH_DMA_RX_BUFFER_NUM=10 -CONFIG_ETH_DMA_TX_BUFFER_NUM=10 -CONFIG_ETH_USE_SPI_ETHERNET=y -CONFIG_ETH_SPI_ETHERNET_DM9051=y +# CONFIG_ETH_USE_ESP32_EMAC is not set +# CONFIG_ETH_USE_SPI_ETHERNET is not set # CONFIG_ETH_USE_OPENETH is not set # end of Ethernet @@ -428,7 +419,7 @@ CONFIG_ESP_EVENT_POST_FROM_IRAM_ISR=y # # ESP HTTP client # -CONFIG_ESP_HTTP_CLIENT_ENABLE_HTTPS=y +# CONFIG_ESP_HTTP_CLIENT_ENABLE_HTTPS is not set # CONFIG_ESP_HTTP_CLIENT_ENABLE_BASIC_AUTH is not set # end of ESP HTTP client @@ -547,14 +538,12 @@ CONFIG_FATFS_ALLOC_PREFER_EXTRAM=y # Modbus configuration # CONFIG_FMB_COMM_MODE_RTU_EN=y -CONFIG_FMB_COMM_MODE_ASCII_EN=y +# CONFIG_FMB_COMM_MODE_ASCII_EN is not set CONFIG_FMB_MASTER_TIMEOUT_MS_RESPOND=150 CONFIG_FMB_MASTER_DELAY_MS_CONVERT=200 CONFIG_FMB_QUEUE_LENGTH=20 CONFIG_FMB_SERIAL_TASK_STACK_SIZE=2048 CONFIG_FMB_SERIAL_BUF_SIZE=256 -CONFIG_FMB_SERIAL_ASCII_BITS_PER_SYMB=8 -CONFIG_FMB_SERIAL_ASCII_TIMEOUT_RESPOND_MS=1000 CONFIG_FMB_SERIAL_TASK_PRIO=10 # CONFIG_FMB_CONTROLLER_SLAVE_ID_SUPPORT is not set CONFIG_FMB_CONTROLLER_NOTIFY_TIMEOUT=20 @@ -598,7 +587,6 @@ CONFIG_FREERTOS_QUEUE_REGISTRY_SIZE=0 # CONFIG_FREERTOS_USE_TRACE_FACILITY is not set # CONFIG_FREERTOS_GENERATE_RUN_TIME_STATS is not set # CONFIG_FREERTOS_DEBUG_INTERNALS is not set -CONFIG_FREERTOS_TASK_FUNCTION_WRAPPER=y CONFIG_FREERTOS_CHECK_MUTEX_GIVEN_BY_OWNER=y # CONFIG_FREERTOS_CHECK_PORT_CRITICAL_COMPLIANCE is not set CONFIG_FREERTOS_DEBUG_OCDAWARE=y @@ -994,24 +982,24 @@ CONFIG_WIFI_PROV_AUTOSTOP_TIMEOUT=30 # # Supplicant # -CONFIG_WPA_MBEDTLS_CRYPTO=y +# CONFIG_WPA_MBEDTLS_CRYPTO is not set # CONFIG_WPA_TLS_V12 is not set # end of Supplicant # # Camera configuration # -CONFIG_OV7670_SUPPORT=y +# CONFIG_OV7670_SUPPORT is not set # CONFIG_OV7725_SUPPORT is not set -CONFIG_NT99141_SUPPORT=y +# CONFIG_NT99141_SUPPORT is not set CONFIG_OV2640_SUPPORT=y -CONFIG_OV3660_SUPPORT=y -CONFIG_OV5640_SUPPORT=y +# CONFIG_OV3660_SUPPORT is not set +# CONFIG_OV5640_SUPPORT is not set # CONFIG_SCCB_HARDWARE_I2C_PORT0 is not set CONFIG_SCCB_HARDWARE_I2C_PORT1=y -CONFIG_CAMERA_CORE0=y +# CONFIG_CAMERA_CORE0 is not set # CONFIG_CAMERA_CORE1 is not set -# CONFIG_CAMERA_NO_AFFINITY is not set +CONFIG_CAMERA_NO_AFFINITY=y # end of Camera configuration # end of Component config @@ -1045,12 +1033,13 @@ CONFIG_MONITOR_BAUD_115200B=y # CONFIG_MONITOR_BAUD_OTHER is not set CONFIG_MONITOR_BAUD_OTHER_VAL=115200 CONFIG_MONITOR_BAUD=115200 -CONFIG_COMPILER_OPTIMIZATION_LEVEL_DEBUG=y -# CONFIG_COMPILER_OPTIMIZATION_LEVEL_RELEASE is not set -CONFIG_OPTIMIZATION_ASSERTIONS_ENABLED=y -# CONFIG_OPTIMIZATION_ASSERTIONS_SILENT is not set +# CONFIG_COMPILER_OPTIMIZATION_LEVEL_DEBUG is not set +CONFIG_COMPILER_OPTIMIZATION_LEVEL_RELEASE=y +# CONFIG_OPTIMIZATION_ASSERTIONS_ENABLED is not set +CONFIG_OPTIMIZATION_ASSERTIONS_SILENT=y # CONFIG_OPTIMIZATION_ASSERTIONS_DISABLED is not set -# CONFIG_CXX_EXCEPTIONS is not set +CONFIG_CXX_EXCEPTIONS=y +CONFIG_CXX_EXCEPTIONS_EMG_POOL_SIZE=0 CONFIG_STACK_CHECK_NONE=y # CONFIG_STACK_CHECK_NORM is not set # CONFIG_STACK_CHECK_STRONG is not set diff --git a/code/sdkconfig.old b/code/sdkconfig.old index f8a0f2c5..a2e15946 100644 --- a/code/sdkconfig.old +++ b/code/sdkconfig.old @@ -131,14 +131,15 @@ CONFIG_EXAMPLE_CONNECT_IPV6=y # # Compiler options # -CONFIG_COMPILER_OPTIMIZATION_DEFAULT=y -# CONFIG_COMPILER_OPTIMIZATION_SIZE is not set +# CONFIG_COMPILER_OPTIMIZATION_DEFAULT is not set +CONFIG_COMPILER_OPTIMIZATION_SIZE=y # CONFIG_COMPILER_OPTIMIZATION_PERF is not set # CONFIG_COMPILER_OPTIMIZATION_NONE is not set -CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_ENABLE=y -# CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT is not set +# CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_ENABLE is not set +CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y # CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE is not set -# CONFIG_COMPILER_CXX_EXCEPTIONS is not set +CONFIG_COMPILER_CXX_EXCEPTIONS=y +CONFIG_COMPILER_CXX_EXCEPTIONS_EMG_POOL_SIZE=0 # CONFIG_COMPILER_CXX_RTTI is not set CONFIG_COMPILER_STACK_CHECK_MODE_NONE=y # CONFIG_COMPILER_STACK_CHECK_MODE_NORM is not set @@ -315,8 +316,8 @@ CONFIG_ESP32_UNIVERSAL_MAC_ADDRESSES_FOUR=y CONFIG_ESP32_UNIVERSAL_MAC_ADDRESSES=4 # CONFIG_ESP32_ULP_COPROC_ENABLED is not set CONFIG_ESP32_ULP_COPROC_RESERVE_MEM=0 -CONFIG_ESP32_PANIC_PRINT_HALT=y -# CONFIG_ESP32_PANIC_PRINT_REBOOT is not set +# CONFIG_ESP32_PANIC_PRINT_HALT is not set +CONFIG_ESP32_PANIC_PRINT_REBOOT=y # CONFIG_ESP32_PANIC_SILENT_REBOOT is not set # CONFIG_ESP32_PANIC_GDBSTUB is not set CONFIG_ESP32_DEBUG_OCDAWARE=y @@ -397,18 +398,8 @@ CONFIG_ESP_TASK_WDT_CHECK_IDLE_TASK_CPU1=y # # Ethernet # -CONFIG_ETH_ENABLED=y -CONFIG_ETH_USE_ESP32_EMAC=y -CONFIG_ETH_PHY_INTERFACE_RMII=y -# CONFIG_ETH_PHY_INTERFACE_MII is not set -CONFIG_ETH_RMII_CLK_INPUT=y -# CONFIG_ETH_RMII_CLK_OUTPUT is not set -CONFIG_ETH_RMII_CLK_IN_GPIO=0 -CONFIG_ETH_DMA_BUFFER_SIZE=512 -CONFIG_ETH_DMA_RX_BUFFER_NUM=10 -CONFIG_ETH_DMA_TX_BUFFER_NUM=10 -CONFIG_ETH_USE_SPI_ETHERNET=y -CONFIG_ETH_SPI_ETHERNET_DM9051=y +# CONFIG_ETH_USE_ESP32_EMAC is not set +# CONFIG_ETH_USE_SPI_ETHERNET is not set # CONFIG_ETH_USE_OPENETH is not set # end of Ethernet @@ -428,7 +419,7 @@ CONFIG_ESP_EVENT_POST_FROM_IRAM_ISR=y # # ESP HTTP client # -CONFIG_ESP_HTTP_CLIENT_ENABLE_HTTPS=y +# CONFIG_ESP_HTTP_CLIENT_ENABLE_HTTPS is not set # CONFIG_ESP_HTTP_CLIENT_ENABLE_BASIC_AUTH is not set # end of ESP HTTP client @@ -547,14 +538,12 @@ CONFIG_FATFS_ALLOC_PREFER_EXTRAM=y # Modbus configuration # CONFIG_FMB_COMM_MODE_RTU_EN=y -CONFIG_FMB_COMM_MODE_ASCII_EN=y +# CONFIG_FMB_COMM_MODE_ASCII_EN is not set CONFIG_FMB_MASTER_TIMEOUT_MS_RESPOND=150 CONFIG_FMB_MASTER_DELAY_MS_CONVERT=200 CONFIG_FMB_QUEUE_LENGTH=20 CONFIG_FMB_SERIAL_TASK_STACK_SIZE=2048 CONFIG_FMB_SERIAL_BUF_SIZE=256 -CONFIG_FMB_SERIAL_ASCII_BITS_PER_SYMB=8 -CONFIG_FMB_SERIAL_ASCII_TIMEOUT_RESPOND_MS=1000 CONFIG_FMB_SERIAL_TASK_PRIO=10 # CONFIG_FMB_CONTROLLER_SLAVE_ID_SUPPORT is not set CONFIG_FMB_CONTROLLER_NOTIFY_TIMEOUT=20 @@ -598,7 +587,6 @@ CONFIG_FREERTOS_QUEUE_REGISTRY_SIZE=0 # CONFIG_FREERTOS_USE_TRACE_FACILITY is not set # CONFIG_FREERTOS_GENERATE_RUN_TIME_STATS is not set # CONFIG_FREERTOS_DEBUG_INTERNALS is not set -CONFIG_FREERTOS_TASK_FUNCTION_WRAPPER=y CONFIG_FREERTOS_CHECK_MUTEX_GIVEN_BY_OWNER=y # CONFIG_FREERTOS_CHECK_PORT_CRITICAL_COMPLIANCE is not set CONFIG_FREERTOS_DEBUG_OCDAWARE=y @@ -994,24 +982,24 @@ CONFIG_WIFI_PROV_AUTOSTOP_TIMEOUT=30 # # Supplicant # -CONFIG_WPA_MBEDTLS_CRYPTO=y +# CONFIG_WPA_MBEDTLS_CRYPTO is not set # CONFIG_WPA_TLS_V12 is not set # end of Supplicant # # Camera configuration # -CONFIG_OV7670_SUPPORT=y +# CONFIG_OV7670_SUPPORT is not set # CONFIG_OV7725_SUPPORT is not set -CONFIG_NT99141_SUPPORT=y +# CONFIG_NT99141_SUPPORT is not set CONFIG_OV2640_SUPPORT=y -CONFIG_OV3660_SUPPORT=y -CONFIG_OV5640_SUPPORT=y +# CONFIG_OV3660_SUPPORT is not set +# CONFIG_OV5640_SUPPORT is not set # CONFIG_SCCB_HARDWARE_I2C_PORT0 is not set CONFIG_SCCB_HARDWARE_I2C_PORT1=y -CONFIG_CAMERA_CORE0=y +# CONFIG_CAMERA_CORE0 is not set # CONFIG_CAMERA_CORE1 is not set -# CONFIG_CAMERA_NO_AFFINITY is not set +CONFIG_CAMERA_NO_AFFINITY=y # end of Camera configuration # end of Component config @@ -1020,152 +1008,3 @@ CONFIG_CAMERA_CORE0=y # # CONFIG_LEGACY_INCLUDE_COMMON_HEADERS is not set # end of Compatibility options - -# Deprecated options for backward compatibility -CONFIG_TOOLPREFIX="xtensa-esp32-elf-" -# CONFIG_LOG_BOOTLOADER_LEVEL_NONE is not set -# CONFIG_LOG_BOOTLOADER_LEVEL_ERROR is not set -# CONFIG_LOG_BOOTLOADER_LEVEL_WARN is not set -CONFIG_LOG_BOOTLOADER_LEVEL_INFO=y -# CONFIG_LOG_BOOTLOADER_LEVEL_DEBUG is not set -# CONFIG_LOG_BOOTLOADER_LEVEL_VERBOSE is not set -CONFIG_LOG_BOOTLOADER_LEVEL=3 -# CONFIG_APP_ROLLBACK_ENABLE is not set -# CONFIG_FLASH_ENCRYPTION_ENABLED is not set -# CONFIG_FLASHMODE_QIO is not set -# CONFIG_FLASHMODE_QOUT is not set -CONFIG_FLASHMODE_DIO=y -# CONFIG_FLASHMODE_DOUT is not set -# CONFIG_MONITOR_BAUD_9600B is not set -# CONFIG_MONITOR_BAUD_57600B is not set -CONFIG_MONITOR_BAUD_115200B=y -# CONFIG_MONITOR_BAUD_230400B is not set -# CONFIG_MONITOR_BAUD_921600B is not set -# CONFIG_MONITOR_BAUD_2MB is not set -# CONFIG_MONITOR_BAUD_OTHER is not set -CONFIG_MONITOR_BAUD_OTHER_VAL=115200 -CONFIG_MONITOR_BAUD=115200 -CONFIG_COMPILER_OPTIMIZATION_LEVEL_DEBUG=y -# CONFIG_COMPILER_OPTIMIZATION_LEVEL_RELEASE is not set -CONFIG_OPTIMIZATION_ASSERTIONS_ENABLED=y -# CONFIG_OPTIMIZATION_ASSERTIONS_SILENT is not set -# CONFIG_OPTIMIZATION_ASSERTIONS_DISABLED is not set -# CONFIG_CXX_EXCEPTIONS is not set -CONFIG_STACK_CHECK_NONE=y -# CONFIG_STACK_CHECK_NORM is not set -# CONFIG_STACK_CHECK_STRONG is not set -# CONFIG_STACK_CHECK_ALL is not set -# CONFIG_WARN_WRITE_STRINGS is not set -# CONFIG_DISABLE_GCC8_WARNINGS is not set -# CONFIG_ESP32_APPTRACE_DEST_TRAX is not set -CONFIG_ESP32_APPTRACE_DEST_NONE=y -CONFIG_ESP32_APPTRACE_LOCK_ENABLE=y -CONFIG_BTDM_CONTROLLER_BLE_MAX_CONN_EFF=0 -CONFIG_BTDM_CONTROLLER_BR_EDR_MAX_ACL_CONN_EFF=0 -CONFIG_BTDM_CONTROLLER_BR_EDR_MAX_SYNC_CONN_EFF=0 -CONFIG_BTDM_CONTROLLER_PINNED_TO_CORE=0 -CONFIG_ADC2_DISABLE_DAC=y -CONFIG_SPIRAM_SUPPORT=y -# CONFIG_WIFI_LWIP_ALLOCATION_FROM_SPIRAM_FIRST is not set -CONFIG_TRACEMEM_RESERVE_DRAM=0x0 -# CONFIG_TWO_UNIVERSAL_MAC_ADDRESS is not set -CONFIG_FOUR_UNIVERSAL_MAC_ADDRESS=y -CONFIG_NUMBER_OF_UNIVERSAL_MAC_ADDRESS=4 -# CONFIG_ULP_COPROC_ENABLED is not set -CONFIG_ULP_COPROC_RESERVE_MEM=0 -CONFIG_BROWNOUT_DET=y -CONFIG_BROWNOUT_DET_LVL_SEL_0=y -# CONFIG_BROWNOUT_DET_LVL_SEL_1 is not set -# CONFIG_BROWNOUT_DET_LVL_SEL_2 is not set -# CONFIG_BROWNOUT_DET_LVL_SEL_3 is not set -# CONFIG_BROWNOUT_DET_LVL_SEL_4 is not set -# CONFIG_BROWNOUT_DET_LVL_SEL_5 is not set -# CONFIG_BROWNOUT_DET_LVL_SEL_6 is not set -# CONFIG_BROWNOUT_DET_LVL_SEL_7 is not set -CONFIG_BROWNOUT_DET_LVL=0 -CONFIG_REDUCE_PHY_TX_POWER=y -CONFIG_ESP32_RTC_CLOCK_SOURCE_INTERNAL_RC=y -# CONFIG_ESP32_RTC_CLOCK_SOURCE_EXTERNAL_CRYSTAL is not set -# CONFIG_ESP32_RTC_CLOCK_SOURCE_EXTERNAL_OSC is not set -# CONFIG_ESP32_RTC_CLOCK_SOURCE_INTERNAL_8MD256 is not set -# CONFIG_DISABLE_BASIC_ROM_CONSOLE is not set -# CONFIG_NO_BLOBS is not set -# CONFIG_COMPATIBLE_PRE_V2_1_BOOTLOADERS is not set -CONFIG_SYSTEM_EVENT_QUEUE_SIZE=32 -CONFIG_SYSTEM_EVENT_TASK_STACK_SIZE=2304 -CONFIG_MAIN_TASK_STACK_SIZE=3584 -CONFIG_IPC_TASK_STACK_SIZE=1024 -CONFIG_TIMER_TASK_STACK_SIZE=3584 -CONFIG_CONSOLE_UART_DEFAULT=y -# CONFIG_CONSOLE_UART_CUSTOM is not set -# CONFIG_CONSOLE_UART_NONE is not set -CONFIG_CONSOLE_UART_NUM=0 -CONFIG_CONSOLE_UART_BAUDRATE=115200 -CONFIG_INT_WDT=y -CONFIG_INT_WDT_TIMEOUT_MS=300 -CONFIG_INT_WDT_CHECK_CPU1=y -CONFIG_TASK_WDT=y -# CONFIG_TASK_WDT_PANIC is not set -CONFIG_TASK_WDT_TIMEOUT_S=3 -CONFIG_TASK_WDT_CHECK_IDLE_TASK_CPU0=y -CONFIG_TASK_WDT_CHECK_IDLE_TASK_CPU1=y -# CONFIG_EVENT_LOOP_PROFILING is not set -CONFIG_POST_EVENTS_FROM_ISR=y -CONFIG_POST_EVENTS_FROM_IRAM_ISR=y -CONFIG_MB_MASTER_TIMEOUT_MS_RESPOND=150 -CONFIG_MB_MASTER_DELAY_MS_CONVERT=200 -CONFIG_MB_QUEUE_LENGTH=20 -CONFIG_MB_SERIAL_TASK_STACK_SIZE=2048 -CONFIG_MB_SERIAL_BUF_SIZE=256 -CONFIG_MB_SERIAL_TASK_PRIO=10 -# CONFIG_MB_CONTROLLER_SLAVE_ID_SUPPORT is not set -CONFIG_MB_CONTROLLER_NOTIFY_TIMEOUT=20 -CONFIG_MB_CONTROLLER_NOTIFY_QUEUE_SIZE=20 -CONFIG_MB_CONTROLLER_STACK_SIZE=4096 -CONFIG_MB_EVENT_QUEUE_TIMEOUT=20 -CONFIG_MB_TIMER_PORT_ENABLED=y -CONFIG_MB_TIMER_GROUP=0 -CONFIG_MB_TIMER_INDEX=0 -CONFIG_SUPPORT_STATIC_ALLOCATION=y -# CONFIG_ENABLE_STATIC_TASK_CLEAN_UP_HOOK is not set -CONFIG_TIMER_TASK_PRIORITY=1 -CONFIG_TIMER_TASK_STACK_DEPTH=2048 -CONFIG_TIMER_QUEUE_LENGTH=10 -# CONFIG_L2_TO_L3_COPY is not set -# CONFIG_USE_ONLY_LWIP_SELECT is not set -CONFIG_ESP_GRATUITOUS_ARP=y -CONFIG_GARP_TMR_INTERVAL=60 -CONFIG_TCPIP_RECVMBOX_SIZE=32 -CONFIG_TCP_MAXRTX=12 -CONFIG_TCP_SYNMAXRTX=6 -CONFIG_TCP_MSS=1440 -CONFIG_TCP_MSL=60000 -CONFIG_TCP_SND_BUF_DEFAULT=5744 -CONFIG_TCP_WND_DEFAULT=5744 -CONFIG_TCP_RECVMBOX_SIZE=6 -CONFIG_TCP_QUEUE_OOSEQ=y -# CONFIG_ESP_TCP_KEEP_CONNECTION_WHEN_IP_CHANGES is not set -CONFIG_TCP_OVERSIZE_MSS=y -# CONFIG_TCP_OVERSIZE_QUARTER_MSS is not set -# CONFIG_TCP_OVERSIZE_DISABLE is not set -CONFIG_UDP_RECVMBOX_SIZE=6 -CONFIG_TCPIP_TASK_STACK_SIZE=3072 -CONFIG_TCPIP_TASK_AFFINITY_NO_AFFINITY=y -# CONFIG_TCPIP_TASK_AFFINITY_CPU0 is not set -# CONFIG_TCPIP_TASK_AFFINITY_CPU1 is not set -CONFIG_TCPIP_TASK_AFFINITY=0x7FFFFFFF -# CONFIG_PPP_SUPPORT is not set -CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT=5 -CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT=3072 -CONFIG_ESP32_PTHREAD_STACK_MIN=768 -CONFIG_ESP32_DEFAULT_PTHREAD_CORE_NO_AFFINITY=y -# CONFIG_ESP32_DEFAULT_PTHREAD_CORE_0 is not set -# CONFIG_ESP32_DEFAULT_PTHREAD_CORE_1 is not set -CONFIG_ESP32_PTHREAD_TASK_CORE_DEFAULT=-1 -CONFIG_ESP32_PTHREAD_TASK_NAME_DEFAULT="pthread" -CONFIG_SPI_FLASH_WRITING_DANGEROUS_REGIONS_ABORTS=y -# CONFIG_SPI_FLASH_WRITING_DANGEROUS_REGIONS_FAILS is not set -# CONFIG_SPI_FLASH_WRITING_DANGEROUS_REGIONS_ALLOWED is not set -# CONFIG_SUPPRESS_SELECT_DEBUG_OUTPUT is not set -CONFIG_SUPPORT_TERMIOS=y -# End of deprecated options diff --git a/code/version.cpp b/code/version.cpp index 59974913..1fb55414 100644 --- a/code/version.cpp +++ b/code/version.cpp @@ -1,4 +1,4 @@ -const char* GIT_REV="21a70c5"; +const char* GIT_REV="520f818"; const char* GIT_TAG=""; -const char* GIT_BRANCH="master"; -const char* BUILD_TIME="2021-04-05 10:14"; \ No newline at end of file +const char* GIT_BRANCH="rolling"; +const char* BUILD_TIME="2021-04-20 19:34"; \ No newline at end of file diff --git a/firmware/bootloader.bin b/firmware/bootloader.bin index bede2cb0..986c3daf 100644 Binary files a/firmware/bootloader.bin and b/firmware/bootloader.bin differ diff --git a/firmware/firmware.bin b/firmware/firmware.bin index c100b1e0..43deab05 100644 Binary files a/firmware/firmware.bin and b/firmware/firmware.bin differ diff --git a/firmware/html.zip b/firmware/html.zip index 33a00339..2fb02aa9 100644 Binary files a/firmware/html.zip and b/firmware/html.zip differ diff --git a/sd-card/config/ana0630s2.tflite b/sd-card/config/ana0630s2.tflite deleted file mode 100644 index 6cba3cf9..00000000 Binary files a/sd-card/config/ana0630s2.tflite and /dev/null differ diff --git a/sd-card/config/config.ini b/sd-card/config/config.ini index 5546a843..08fa260b 100644 --- a/sd-card/config/config.ini +++ b/sd-card/config/config.ini @@ -17,7 +17,7 @@ InitialMirror= false AlignmentAlgo = Default [Digits] -Model = /config/dig0820s2q.tflite +Model = /config/dig0830s2q.tflite ;LogImageLocation = /log/digit ;LogfileRetentionInDays = 3 ModelInputSize = 20 32 diff --git a/sd-card/config/dig0811s1.tflite b/sd-card/config/dig0811s1.tflite deleted file mode 100644 index 8a78aed4..00000000 Binary files a/sd-card/config/dig0811s1.tflite and /dev/null differ diff --git a/sd-card/config/dig0820s2q.tflite b/sd-card/config/dig0820s2q.tflite deleted file mode 100644 index 1ad67dd0..00000000 Binary files a/sd-card/config/dig0820s2q.tflite and /dev/null differ diff --git a/sd-card/config/dig0830s2q.tflite b/sd-card/config/dig0830s2q.tflite new file mode 100644 index 00000000..4ac84793 Binary files /dev/null and b/sd-card/config/dig0830s2q.tflite differ diff --git a/sd-card/html/index_configure.html b/sd-card/html/index_configure.html index f55e9a05..70187bef 100644 --- a/sd-card/html/index_configure.html +++ b/sd-card/html/index_configure.html @@ -91,7 +91,6 @@ li.dropdown { Analog ROIs -
  • Check