mirror of
https://github.com/jomjol/AI-on-the-edge-device.git
synced 2025-12-10 21:46:55 +03:00
Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2daf6c8b3f | ||
|
|
24b46158de | ||
|
|
63d336ba28 | ||
|
|
8dd3a92671 | ||
|
|
8e8897c70d | ||
|
|
eac513411e | ||
|
|
98f9274085 | ||
|
|
884dd9fc3b | ||
|
|
04c564936a | ||
|
|
957138960a | ||
|
|
58a0297915 | ||
|
|
61bf536207 | ||
|
|
4136a7b372 | ||
|
|
b3afc9961d | ||
|
|
27e2e042da | ||
|
|
cc659bad30 | ||
|
|
776931c0ad | ||
|
|
e22b4b6fe1 | ||
|
|
cad534bffe | ||
|
|
3b44adb6fa | ||
|
|
cc0bd1473f | ||
|
|
58124d27bf | ||
|
|
9ad118814a | ||
|
|
6d298c1746 |
63
Changelog.md
63
Changelog.md
@@ -2,7 +2,70 @@
|
||||
|
||||
|
||||
|
||||
##### 8.5.0 - Multi Meter Support (2021-10-07)
|
||||
|
||||
* Upgrade digital CNN to v13.1.0 (added new images)
|
||||
* bug fix: wlan password with space, double digit output
|
||||
|
||||
##### 8.4.0 - Multi Meter Support (2021-09-25)
|
||||
|
||||
* License change (remove MIT license, remark see below)
|
||||
|
||||
* html: show hostname in title and main page
|
||||
|
||||
* configuration:
|
||||
|
||||
* moved setting `ExtendedResolution` to individual number settings
|
||||
* New parameter `IgnoreLeadingNaN` (delete leading NaN's specifically)
|
||||
* **ATTENTION**: update of the `config.ini` needed (open, adjust `ExtendedResolution`, save)
|
||||
|
||||
* Bug fixing (html, images of recognized numbers)
|
||||
|
||||
|
||||
|
||||
### **ATTENTION: LICENSE CHANGE - removal of MIT License.**
|
||||
|
||||
- Currently no licence published - copyright belongs to author
|
||||
- If you are interested in a commercial usage or dedicated versions please contact the developer
|
||||
- no limits to private usage
|
||||
|
||||
|
||||
|
||||
##### 8.3.0 - Multi Meter Support (2021-09-12)
|
||||
|
||||
* Upgrade digital CNN to v12.1.0 (added new images)
|
||||
* Dedicated NaN handling, internal refactoring (CNN-Handling)
|
||||
* HTML: confirmation after config.ini update
|
||||
* Bug fixing
|
||||
|
||||
##### 8.2.0 - Multi Meter Support (2021-08-24)
|
||||
|
||||
* Improve server responsiveness
|
||||
* Flow status and prevalue status in overview
|
||||
* Improved prevalue handling
|
||||
|
||||
##### 8.1.0 - Multi Meter Support (2021-08-12)
|
||||
|
||||
* GPIO: using the general mqtt main topic for GPIO
|
||||
|
||||
* Upgrade digital CNN to v12.0.0 (added new images)
|
||||
* Update tfmicro to new master (2021-08-07)
|
||||
* Bug fix: remove text in mqtt value, remove connect limit in wlan reconnet
|
||||
|
||||
##### 8.0.5 - Multi Meter Support (2021-08-01)
|
||||
|
||||
* NEW 8.0.5: bug fix: saving prevalue
|
||||
* NEW 8.0.4: bug fix: load config.ini after upgrade
|
||||
* NEW 8.0.3: bug fix: reboot during `config.ini` handling, html error
|
||||
* NEW 8.0.2: saving roundes prevalue, bug fix html server
|
||||
* NEW 8.0.1: bug fix: html handling of parameter `FixedExposure` and `ImageSize`
|
||||
* Dual / multi meter support (more than 1 number to be recognized)
|
||||
This is implemented with the feature "number" on the ROI definition as well as selected options
|
||||
* MQTT: standardization of the naming - including new topics (`json`, `freeMem `, `uptime`)c
|
||||
* Preparation for extended GPIO support (thanks to Zwerk2k) - not tested and fully functional yet
|
||||
* Bug fixing: html server, memory leak, MQTT connect, hostname, turn of flash LED
|
||||
|
||||
<span style="color: red;">**ATTENTION: the configuration and prevalue files are modified automatically and will not be backward compatible!**</span>
|
||||
|
||||
##### 7.1.2 MQTT-Update - (2021-06-17)
|
||||
|
||||
|
||||
@@ -11,6 +11,59 @@
|
||||
|
||||
____
|
||||
|
||||
#### #17 Direct InfluxDB connection
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/534
|
||||
* Direct interface to a InfluxDB data base
|
||||
* Integrate InfluxDB interface in firmware
|
||||
* Adapt html web page for configuration
|
||||
|
||||
|
||||
#### #16 Serial Communication
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/512
|
||||
* Send the readout value via RX/TX interface with a dedicated TAG
|
||||
* Make dedicated communication FlowModule
|
||||
* Modification of RX/TX communication
|
||||
* Configuration interfache
|
||||
|
||||
|
||||
#### #15 Calibration for FishEye image
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/507
|
||||
|
||||
1. The development of such a correction algorithm with the libraries, that are available for the ESP32 environment.
|
||||
2. New module for integration of the flow into the image processing flow.
|
||||
3. Extension of the configuration (config.ini) and html-pages
|
||||
4. Parameter adjustment and testing for every different fish-eye module
|
||||
5. Maintenance for further updates / modules, ...
|
||||
|
||||
|
||||
|
||||
#### #14 Backup and restore option for configuration
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/459
|
||||
|
||||
* Implement a zip file compression for store and restore
|
||||
|
||||
* Update the html to handle it
|
||||
|
||||
|
||||
|
||||
#### #13 Manage non linear gauge without CNN re-training
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/443
|
||||
|
||||
* Implement a look up table for non linear analog meters
|
||||
|
||||
|
||||
|
||||
#### #12 Less reboots due to memory leakage
|
||||
|
||||
* Issue: #414 & #425 #430
|
||||
|
||||
|
||||
|
||||
#### #11 MQTT - configurable payload
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/344
|
||||
|
||||
121
README.md
121
README.md
@@ -34,21 +34,67 @@ If you have any technical topics, you can file a issue in this repository.
|
||||
|
||||
In other cases you can contact the developer via email: <img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/mail.jpg" height="25">
|
||||
|
||||
## Change log
|
||||
------
|
||||
## Coming next
|
||||
|
||||
* Automated update of the neural network file (tflite) to make the learing of additional pictures much easier and automated (GitHub action)
|
||||
* New "hyprid" neural network for digital numbers --> allowing the detection of intermediate states ("ring between two numbers") as a subdigit
|
||||
|
||||
|
||||
------
|
||||
|
||||
## Change log
|
||||
### Known Issues
|
||||
|
||||
* slow response of web server during picture analysis
|
||||
* spontaneous reboots (mostly due to html access during image processing) - self recovery implemented
|
||||
|
||||
------
|
||||
|
||||
**General remark:** Beside the `firmware.bin`, typically also the content of `/html` needs to be updated!
|
||||
|
||||
------
|
||||
|
||||
|
||||
|
||||
##### 10.1.0 - Stability Increase (2022-01-09)
|
||||
|
||||
- Reduce ESP32 frequency to 160MHz
|
||||
|
||||
- Update tflite (new source: https://github.com/espressif/tflite-micro-esp-examples)
|
||||
|
||||
- Update analog neural network (ana-s3-q-20220105.tflite)
|
||||
|
||||
- Update digital neural network (dig-s1-q-20220102.tflite)
|
||||
|
||||
- Increased web-server buffers
|
||||
- bug fix: compiler compatibility
|
||||
|
||||
##### 10.0.2 - Stability Increase (2022-01-01)
|
||||
|
||||
- NEW v10.0.2: Corrected JSON error
|
||||
|
||||
- Updated compiler toolchain to ESP-IDF 4.3
|
||||
|
||||
- Removal of memory leak
|
||||
|
||||
- Improved error handling during startup (check PSRAM and camera with remark in logfile)
|
||||
|
||||
- MQTT: implemented raw value additionally, removal of regex contrain
|
||||
|
||||
- Normalized Parameter ``MaxRateValue`` to "change per minute"
|
||||
|
||||
- HTML: improved input handling
|
||||
|
||||
- Corrected error handling: in case of error the old value, rate, timestamp are not transmitted any more
|
||||
|
||||
|
||||
|
||||
##### 9.2.0 - External Illumination (2021-12-02)
|
||||
|
||||
- Direct JSON access: ``http://IP-ADRESS/json``
|
||||
- Error message in log file in case camera error during startup
|
||||
- Upgrade analog CNN to v9.1.0
|
||||
- Upgrade digital CNN to v13.3.0 (added new images)
|
||||
- html: support of different ports
|
||||
|
||||
##### 9.1.1 - External Illumination (2021-11-16)
|
||||
|
||||
- NEW 9.1.1 bug fix: LED implemenetation
|
||||
@@ -65,70 +111,7 @@ In other cases you can contact the developer via email: <img src="https://raw.gi
|
||||
|
||||
|
||||
|
||||
##### 8.5.0 - Multi Meter Support (2021-10-07)
|
||||
|
||||
* Upgrade digital CNN to v13.1.0 (added new images)
|
||||
* bug fix: wlan password with space, double digit output
|
||||
|
||||
##### 8.4.0 - Multi Meter Support (2021-09-25)
|
||||
|
||||
* License change (remove MIT license, remark see below)
|
||||
|
||||
* html: show hostname in title and main page
|
||||
|
||||
* configuration:
|
||||
|
||||
* moved setting `ExtendedResolution` to individual number settings
|
||||
* New parameter `IgnoreLeadingNaN` (delete leading NaN's specifically)
|
||||
* **ATTENTION**: update of the `config.ini` needed (open, adjust `ExtendedResolution`, save)
|
||||
|
||||
* Bug fixing (html, images of recognized numbers)
|
||||
|
||||
|
||||
|
||||
### **ATTENTION: LICENSE CHANGE - removal of MIT License.**
|
||||
|
||||
- Currently no licence published - copyright belongs to author
|
||||
- If you are interested in a commercial usage or dedicated versions please contact the developer
|
||||
- no limits to private usage
|
||||
|
||||
|
||||
|
||||
##### 8.3.0 - Multi Meter Support (2021-09-12)
|
||||
|
||||
* Upgrade digital CNN to v12.1.0 (added new images)
|
||||
* Dedicated NaN handling, internal refactoring (CNN-Handling)
|
||||
* HTML: confirmation after config.ini update
|
||||
* Bug fixing
|
||||
|
||||
##### 8.2.0 - Multi Meter Support (2021-08-24)
|
||||
|
||||
* Improve server responsiveness
|
||||
* Flow status and prevalue status in overview
|
||||
* Improved prevalue handling
|
||||
|
||||
##### 8.1.0 - Multi Meter Support (2021-08-12)
|
||||
|
||||
* GPIO: using the general mqtt main topic for GPIO
|
||||
|
||||
* Upgrade digital CNN to v12.0.0 (added new images)
|
||||
* Update tfmicro to new master (2021-08-07)
|
||||
* Bug fix: remove text in mqtt value, remove connect limit in wlan reconnet
|
||||
|
||||
##### 8.0.5 - Multi Meter Support (2021-08-01)
|
||||
|
||||
* NEW 8.0.5: bug fix: saving prevalue
|
||||
* NEW 8.0.4: bug fix: load config.ini after upgrade
|
||||
* NEW 8.0.3: bug fix: reboot during `config.ini` handling, html error
|
||||
* NEW 8.0.2: saving roundes prevalue, bug fix html server
|
||||
* NEW 8.0.1: bug fix: html handling of parameter `FixedExposure` and `ImageSize`
|
||||
* Dual / multi meter support (more than 1 number to be recognized)
|
||||
This is implemented with the feature "number" on the ROI definition as well as selected options
|
||||
* MQTT: standardization of the naming - including new topics (`json`, `freeMem `, `uptime`)c
|
||||
* Preparation for extended GPIO support (thanks to Zwerk2k) - not tested and fully functional yet
|
||||
* Bug fixing: html server, memory leak, MQTT connect, hostname, turn of flash LED
|
||||
|
||||
<span style="color: red;">**ATTENTION: the configuration and prevalue files are modified automatically and will not be backward compatible!**</span>
|
||||
|
||||
|
||||
## Additional ideas
|
||||
@@ -141,9 +124,9 @@ There are some ideas and feature request, which are not followed currently - mai
|
||||
|
||||
## History
|
||||
|
||||
##### 7.1.2 MQTT-Update - (2021-06-17)
|
||||
##### 8.5.0 - Multi Meter Support (2021-10-07)
|
||||
|
||||
**7.0.1 MQTT-Update - (2021-05-13)**
|
||||
##### 7.1.2 MQTT-Update - (2021-06-17)
|
||||
|
||||
##### 6.7.2 Image Processing in Memory - (2021-05-01)
|
||||
|
||||
|
||||
3
code/.helper/copy-esp-idf.bat
Normal file
3
code/.helper/copy-esp-idf.bat
Normal file
@@ -0,0 +1,3 @@
|
||||
copy "..\..\code\build\esp32cam-server-only.bin" "..\..\firmware\firmware.bin"
|
||||
copy "..\..\code\build\bootloader\bootloader.bin" "..\..\firmware\bootloader.bin"
|
||||
copy "..\..\code\build\partition_table\partition-table.bin" "..\..\firmware\partitions.bin"
|
||||
@@ -1,63 +0,0 @@
|
||||
#include "SmartLeds.h"
|
||||
|
||||
IsrCore SmartLed::_interruptCore = CoreCurrent;
|
||||
intr_handle_t SmartLed::_interruptHandle = NULL;
|
||||
|
||||
SmartLed*& IRAM_ATTR SmartLed::ledForChannel( int channel ) {
|
||||
static SmartLed* table[8] = { nullptr };
|
||||
assert( channel < 8 );
|
||||
return table[ channel ];
|
||||
}
|
||||
|
||||
void IRAM_ATTR SmartLed::interruptHandler(void*) {
|
||||
for (int channel = 0; channel != 8; channel++) {
|
||||
auto self = ledForChannel( channel );
|
||||
|
||||
if ( RMT.int_st.val & (1 << (24 + channel ) ) ) { // tx_thr_event
|
||||
if ( self )
|
||||
self->copyRmtHalfBlock();
|
||||
RMT.int_clr.val |= 1 << ( 24 + channel );
|
||||
} else if ( RMT.int_st.val & ( 1 << (3 * channel ) ) ) { // tx_end
|
||||
if ( self )
|
||||
xSemaphoreGiveFromISR( self->_finishedFlag, nullptr );
|
||||
RMT.int_clr.val |= 1 << ( 3 * channel );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void IRAM_ATTR SmartLed::copyRmtHalfBlock() {
|
||||
int offset = detail::MAX_PULSES * _halfIdx;
|
||||
_halfIdx = !_halfIdx;
|
||||
int len = 3 - _componentPosition + 3 * ( _count - 1 );
|
||||
len = std::min( len, detail::MAX_PULSES / 8 );
|
||||
|
||||
if ( !len ) {
|
||||
for ( int i = 0; i < detail::MAX_PULSES; i++) {
|
||||
RMTMEM.chan[ _channel].data32[i + offset ].val = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int i;
|
||||
for ( i = 0; i != len && _pixelPosition != _count; i++ ) {
|
||||
uint8_t val = _buffer[ _pixelPosition ].getGrb( _componentPosition );
|
||||
for ( int j = 0; j != 8; j++, val <<= 1 ) {
|
||||
int bit = val >> 7;
|
||||
int idx = i * 8 + offset + j;
|
||||
RMTMEM.chan[ _channel ].data32[ idx ].val = _bitToRmt[ bit & 0x01 ].value;
|
||||
}
|
||||
if ( _pixelPosition == _count - 1 && _componentPosition == 2 ) {
|
||||
RMTMEM.chan[ _channel ].data32[ i * 8 + offset + 7 ].duration1 =
|
||||
_timing.TRS / ( detail::RMT_DURATION_NS * detail::DIVIDER );
|
||||
}
|
||||
|
||||
_componentPosition++;
|
||||
if ( _componentPosition == 3 ) {
|
||||
_componentPosition = 0;
|
||||
_pixelPosition++;
|
||||
}
|
||||
}
|
||||
|
||||
for ( i *= 8; i != detail::MAX_PULSES; i++ ) {
|
||||
RMTMEM.chan[ _channel ].data32[ i + offset ].val = 0;
|
||||
}
|
||||
}
|
||||
530
code/SmartLeds.h
530
code/SmartLeds.h
@@ -1,530 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
/*
|
||||
* A C++ driver for the WS2812 LEDs using the RMT peripheral on the ESP32.
|
||||
*
|
||||
* Jan "yaqwsx" Mrázek <email@honzamrazek.cz>
|
||||
*
|
||||
* Based on the work by Martin F. Falatic - https://github.com/FozzTexx/ws2812-demo
|
||||
*/
|
||||
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
|
||||
#if defined ( ARDUINO )
|
||||
extern "C" { // ...someone forgot to put in the includes...
|
||||
#include "esp32-hal.h"
|
||||
#include "esp_intr_alloc.h"
|
||||
#include "esp_ipc.h"
|
||||
#include "driver/gpio.h"
|
||||
#include "driver/periph_ctrl.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "soc/rmt_struct.h"
|
||||
#include <driver/spi_master.h>
|
||||
#include "esp_idf_version.h"
|
||||
#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL( 4, 0, 0 )
|
||||
#include "soc/dport_reg.h"
|
||||
#endif
|
||||
}
|
||||
#elif defined ( ESP_PLATFORM )
|
||||
extern "C" { // ...someone forgot to put in the includes...
|
||||
#include <esp_intr_alloc.h>
|
||||
#include <esp_ipc.h>
|
||||
#include <driver/gpio.h>
|
||||
#include <freertos/FreeRTOS.h>
|
||||
#include <freertos/semphr.h>
|
||||
#include <soc/dport_reg.h>
|
||||
#include <soc/gpio_sig_map.h>
|
||||
#include <soc/rmt_struct.h>
|
||||
#include <driver/spi_master.h>
|
||||
}
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
#include "Color.h"
|
||||
|
||||
namespace detail {
|
||||
|
||||
struct TimingParams {
|
||||
uint32_t T0H;
|
||||
uint32_t T1H;
|
||||
uint32_t T0L;
|
||||
uint32_t T1L;
|
||||
uint32_t TRS;
|
||||
};
|
||||
|
||||
union RmtPulsePair {
|
||||
struct {
|
||||
int duration0:15;
|
||||
int level0:1;
|
||||
int duration1:15;
|
||||
int level1:1;
|
||||
};
|
||||
uint32_t value;
|
||||
};
|
||||
|
||||
static const int DIVIDER = 4; // 8 still seems to work, but timings become marginal
|
||||
static const int MAX_PULSES = 32; // A channel has a 64 "pulse" buffer - we use half per pass
|
||||
static const double RMT_DURATION_NS = 12.5; // minimum time of a single RMT duration based on clock ns
|
||||
|
||||
} // namespace detail
|
||||
|
||||
using LedType = detail::TimingParams;
|
||||
|
||||
static const LedType LED_WS2812 = { 350, 700, 800, 600, 50000 };
|
||||
static const LedType LED_WS2812B = { 400, 850, 850, 400, 50100 };
|
||||
static const LedType LED_SK6812 = { 300, 600, 900, 600, 80000 };
|
||||
static const LedType LED_WS2813 = { 350, 800, 350, 350, 300000 };
|
||||
|
||||
enum BufferType { SingleBuffer = 0, DoubleBuffer };
|
||||
|
||||
enum IsrCore { CoreFirst = 0, CoreSecond = 1, CoreCurrent = 2};
|
||||
|
||||
class SmartLed {
|
||||
public:
|
||||
// The RMT interrupt must not run on the same core as WiFi interrupts, otherwise SmartLeds
|
||||
// can't fill the RMT buffer fast enough, resulting in rendering artifacts.
|
||||
// Usually, that means you have to set isrCore == CoreSecond.
|
||||
//
|
||||
// If you use anything other than CoreCurrent, the FreeRTOS scheduler MUST be already running,
|
||||
// so you can't use it if you define SmartLed as global variable.
|
||||
SmartLed( const LedType& type, int count, int pin, int channel = 0, BufferType doubleBuffer = SingleBuffer, IsrCore isrCore = CoreCurrent)
|
||||
: _timing( type ),
|
||||
_channel( channel ),
|
||||
_count( count ),
|
||||
_firstBuffer( new Rgb[ count ] ),
|
||||
_secondBuffer( doubleBuffer ? new Rgb[ count ] : nullptr ),
|
||||
_finishedFlag( xSemaphoreCreateBinary() )
|
||||
{
|
||||
assert( channel >= 0 && channel < 8 );
|
||||
assert( ledForChannel( channel ) == nullptr );
|
||||
|
||||
xSemaphoreGive( _finishedFlag );
|
||||
|
||||
DPORT_SET_PERI_REG_MASK( DPORT_PERIP_CLK_EN_REG, DPORT_RMT_CLK_EN );
|
||||
DPORT_CLEAR_PERI_REG_MASK( DPORT_PERIP_RST_EN_REG, DPORT_RMT_RST );
|
||||
|
||||
PIN_FUNC_SELECT( GPIO_PIN_MUX_REG[ pin ], 2 );
|
||||
gpio_set_direction( static_cast< gpio_num_t >( pin ), GPIO_MODE_OUTPUT );
|
||||
gpio_matrix_out( static_cast< gpio_num_t >( pin ), RMT_SIG_OUT0_IDX + _channel, 0, 0 );
|
||||
initChannel( _channel );
|
||||
|
||||
RMT.tx_lim_ch[ _channel ].limit = detail::MAX_PULSES;
|
||||
RMT.int_ena.val |= 1 << ( 24 + _channel );
|
||||
RMT.int_ena.val |= 1 << ( 3 * _channel );
|
||||
|
||||
_bitToRmt[ 0 ].level0 = 1;
|
||||
_bitToRmt[ 0 ].level1 = 0;
|
||||
_bitToRmt[ 0 ].duration0 = _timing.T0H / ( detail::RMT_DURATION_NS * detail::DIVIDER );
|
||||
_bitToRmt[ 0 ].duration1 = _timing.T0L / ( detail::RMT_DURATION_NS * detail::DIVIDER );
|
||||
|
||||
_bitToRmt[ 1 ].level0 = 1;
|
||||
_bitToRmt[ 1 ].level1 = 0;
|
||||
_bitToRmt[ 1 ].duration0 = _timing.T1H / ( detail::RMT_DURATION_NS * detail::DIVIDER );
|
||||
_bitToRmt[ 1 ].duration1 = _timing.T1L / ( detail::RMT_DURATION_NS * detail::DIVIDER );
|
||||
|
||||
if ( !anyAlive() ) {
|
||||
_interruptCore = isrCore;
|
||||
if(isrCore != CoreCurrent) {
|
||||
ESP_ERROR_CHECK(esp_ipc_call_blocking(isrCore, registerInterrupt, NULL));
|
||||
} else {
|
||||
registerInterrupt(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
ledForChannel( channel ) = this;
|
||||
}
|
||||
|
||||
~SmartLed() {
|
||||
ledForChannel( _channel ) = nullptr;
|
||||
if ( !anyAlive() ) {
|
||||
if(_interruptCore != CoreCurrent) {
|
||||
ESP_ERROR_CHECK(esp_ipc_call_blocking(_interruptCore, unregisterInterrupt, NULL));
|
||||
} else {
|
||||
unregisterInterrupt(NULL);
|
||||
}
|
||||
}
|
||||
vSemaphoreDelete( _finishedFlag );
|
||||
}
|
||||
|
||||
Rgb& operator[]( int idx ) {
|
||||
return _firstBuffer[ idx ];
|
||||
}
|
||||
|
||||
const Rgb& operator[]( int idx ) const {
|
||||
return _firstBuffer[ idx ];
|
||||
}
|
||||
|
||||
void show() {
|
||||
_buffer = _firstBuffer.get();
|
||||
startTransmission();
|
||||
swapBuffers();
|
||||
}
|
||||
|
||||
bool wait( TickType_t timeout = portMAX_DELAY ) {
|
||||
if( xSemaphoreTake( _finishedFlag, timeout ) == pdTRUE ) {
|
||||
xSemaphoreGive( _finishedFlag );
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int size() const {
|
||||
return _count;
|
||||
}
|
||||
|
||||
Rgb *begin() { return _firstBuffer.get(); }
|
||||
const Rgb *begin() const { return _firstBuffer.get(); }
|
||||
const Rgb *cbegin() const { return _firstBuffer.get(); }
|
||||
|
||||
Rgb *end() { return _firstBuffer.get() + _count; }
|
||||
const Rgb *end() const { return _firstBuffer.get() + _count; }
|
||||
const Rgb *cend() const { return _firstBuffer.get() + _count; }
|
||||
|
||||
private:
|
||||
static intr_handle_t _interruptHandle;
|
||||
static IsrCore _interruptCore;
|
||||
|
||||
static void initChannel( int channel ) {
|
||||
RMT.apb_conf.fifo_mask = 1; //enable memory access, instead of FIFO mode.
|
||||
RMT.apb_conf.mem_tx_wrap_en = 1; //wrap around when hitting end of buffer
|
||||
RMT.conf_ch[ channel ].conf0.div_cnt = detail::DIVIDER;
|
||||
RMT.conf_ch[ channel ].conf0.mem_size = 1;
|
||||
RMT.conf_ch[ channel ].conf0.carrier_en = 0;
|
||||
RMT.conf_ch[ channel ].conf0.carrier_out_lv = 1;
|
||||
RMT.conf_ch[ channel ].conf0.mem_pd = 0;
|
||||
|
||||
RMT.conf_ch[ channel ].conf1.rx_en = 0;
|
||||
RMT.conf_ch[ channel ].conf1.mem_owner = 0;
|
||||
RMT.conf_ch[ channel ].conf1.tx_conti_mode = 0; //loop back mode.
|
||||
RMT.conf_ch[ channel ].conf1.ref_always_on = 1; // use apb clock: 80M
|
||||
RMT.conf_ch[ channel ].conf1.idle_out_en = 1;
|
||||
RMT.conf_ch[ channel ].conf1.idle_out_lv = 0;
|
||||
}
|
||||
|
||||
static void registerInterrupt(void *) {
|
||||
ESP_ERROR_CHECK(esp_intr_alloc( ETS_RMT_INTR_SOURCE, 0, interruptHandler, nullptr, &_interruptHandle));
|
||||
}
|
||||
|
||||
static void unregisterInterrupt(void*) {
|
||||
esp_intr_free( _interruptHandle );
|
||||
}
|
||||
|
||||
static SmartLed*& IRAM_ATTR ledForChannel( int channel );
|
||||
static void IRAM_ATTR interruptHandler( void* );
|
||||
|
||||
void IRAM_ATTR copyRmtHalfBlock();
|
||||
|
||||
void swapBuffers() {
|
||||
if ( _secondBuffer )
|
||||
_firstBuffer.swap( _secondBuffer );
|
||||
}
|
||||
|
||||
void startTransmission() {
|
||||
// Invalid use of the library
|
||||
if( xSemaphoreTake( _finishedFlag, 0 ) != pdTRUE )
|
||||
abort();
|
||||
|
||||
_pixelPosition = _componentPosition = _halfIdx = 0;
|
||||
copyRmtHalfBlock();
|
||||
if ( _pixelPosition < _count )
|
||||
copyRmtHalfBlock();
|
||||
|
||||
RMT.conf_ch[ _channel ].conf1.mem_rd_rst = 1;
|
||||
RMT.conf_ch[ _channel ].conf1.tx_start = 1;
|
||||
}
|
||||
|
||||
static bool anyAlive() {
|
||||
for ( int i = 0; i != 8; i++ )
|
||||
if ( ledForChannel( i ) != nullptr ) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
const LedType& _timing;
|
||||
int _channel;
|
||||
detail::RmtPulsePair _bitToRmt[ 2 ];
|
||||
int _count;
|
||||
std::unique_ptr< Rgb[] > _firstBuffer;
|
||||
std::unique_ptr< Rgb[] > _secondBuffer;
|
||||
Rgb *_buffer;
|
||||
|
||||
xSemaphoreHandle _finishedFlag;
|
||||
|
||||
int _pixelPosition;
|
||||
int _componentPosition;
|
||||
int _halfIdx;
|
||||
};
|
||||
|
||||
class Apa102 {
|
||||
public:
|
||||
struct ApaRgb {
|
||||
ApaRgb( uint8_t r = 0, uint8_t g = 0, uint32_t b = 0, uint32_t v = 0xFF )
|
||||
: v( 0xE0 | v ), b( b ), g( g ), r( r )
|
||||
{}
|
||||
|
||||
ApaRgb& operator=( const Rgb& o ) {
|
||||
r = o.r;
|
||||
g = o.g;
|
||||
b = o.b;
|
||||
return *this;
|
||||
}
|
||||
|
||||
ApaRgb& operator=( const Hsv& o ) {
|
||||
*this = Rgb{ o };
|
||||
return *this;
|
||||
}
|
||||
|
||||
uint8_t v, b, g, r;
|
||||
};
|
||||
|
||||
static const int FINAL_FRAME_SIZE = 4;
|
||||
static const int TRANS_COUNT = 2 + 8;
|
||||
|
||||
Apa102( int count, int clkpin, int datapin, BufferType doubleBuffer = SingleBuffer )
|
||||
: _count( count ),
|
||||
_firstBuffer( new ApaRgb[ count ] ),
|
||||
_secondBuffer( doubleBuffer ? new ApaRgb[ count ] : nullptr ),
|
||||
_initFrame( 0 )
|
||||
{
|
||||
spi_bus_config_t buscfg;
|
||||
memset( &buscfg, 0, sizeof( buscfg ) );
|
||||
buscfg.mosi_io_num = datapin;
|
||||
buscfg.miso_io_num = -1;
|
||||
buscfg.sclk_io_num = clkpin;
|
||||
buscfg.quadwp_io_num = -1;
|
||||
buscfg.quadhd_io_num = -1;
|
||||
buscfg.max_transfer_sz = 65535;
|
||||
|
||||
spi_device_interface_config_t devcfg;
|
||||
memset( &devcfg, 0, sizeof( devcfg ) );
|
||||
devcfg.clock_speed_hz = 1000000;
|
||||
devcfg.mode = 0;
|
||||
devcfg.spics_io_num = -1;
|
||||
devcfg.queue_size = TRANS_COUNT;
|
||||
devcfg.pre_cb = nullptr;
|
||||
|
||||
auto ret = spi_bus_initialize( HSPI_HOST, &buscfg, 1 );
|
||||
assert( ret == ESP_OK );
|
||||
|
||||
ret = spi_bus_add_device( HSPI_HOST, &devcfg, &_spi );
|
||||
assert( ret == ESP_OK );
|
||||
|
||||
std::fill_n( _finalFrame, FINAL_FRAME_SIZE, 0xFFFFFFFF );
|
||||
}
|
||||
|
||||
~Apa102() {
|
||||
// ToDo
|
||||
}
|
||||
|
||||
ApaRgb& operator[]( int idx ) {
|
||||
return _firstBuffer[ idx ];
|
||||
}
|
||||
|
||||
const ApaRgb& operator[]( int idx ) const {
|
||||
return _firstBuffer[ idx ];
|
||||
}
|
||||
|
||||
void show() {
|
||||
_buffer = _firstBuffer.get();
|
||||
startTransmission();
|
||||
swapBuffers();
|
||||
}
|
||||
|
||||
void wait() {
|
||||
for ( int i = 0; i != _transCount; i++ ) {
|
||||
spi_transaction_t *t;
|
||||
spi_device_get_trans_result( _spi, &t, portMAX_DELAY );
|
||||
}
|
||||
}
|
||||
private:
|
||||
void swapBuffers() {
|
||||
if ( _secondBuffer )
|
||||
_firstBuffer.swap( _secondBuffer );
|
||||
}
|
||||
|
||||
void startTransmission() {
|
||||
for ( int i = 0; i != TRANS_COUNT; i++ ) {
|
||||
_transactions[ i ].cmd = 0;
|
||||
_transactions[ i ].addr = 0;
|
||||
_transactions[ i ].flags = 0;
|
||||
_transactions[ i ].rxlength = 0;
|
||||
_transactions[ i ].rx_buffer = nullptr;
|
||||
}
|
||||
// Init frame
|
||||
_transactions[ 0 ].length = 32;
|
||||
_transactions[ 0 ].tx_buffer = &_initFrame;
|
||||
spi_device_queue_trans( _spi, _transactions + 0, portMAX_DELAY );
|
||||
// Data
|
||||
_transactions[ 1 ].length = 32 * _count;
|
||||
_transactions[ 1 ].tx_buffer = _buffer;
|
||||
spi_device_queue_trans( _spi, _transactions + 1, portMAX_DELAY );
|
||||
_transCount = 2;
|
||||
// End frame
|
||||
for ( int i = 0; i != 1 + _count / 32 / FINAL_FRAME_SIZE; i++ ) {
|
||||
_transactions[ 2 + i ].length = 32 * FINAL_FRAME_SIZE;
|
||||
_transactions[ 2 + i ].tx_buffer = _finalFrame;
|
||||
spi_device_queue_trans( _spi, _transactions + 2 + i, portMAX_DELAY );
|
||||
_transCount++;
|
||||
}
|
||||
}
|
||||
|
||||
spi_device_handle_t _spi;
|
||||
int _count;
|
||||
std::unique_ptr< ApaRgb[] > _firstBuffer, _secondBuffer;
|
||||
ApaRgb *_buffer;
|
||||
|
||||
spi_transaction_t _transactions[ TRANS_COUNT ];
|
||||
int _transCount;
|
||||
|
||||
uint32_t _initFrame;
|
||||
uint32_t _finalFrame[ FINAL_FRAME_SIZE ];
|
||||
};
|
||||
|
||||
class LDP8806 {
|
||||
public:
|
||||
struct LDP8806_GRB {
|
||||
|
||||
LDP8806_GRB( uint8_t g_7bit = 0, uint8_t r_7bit = 0, uint32_t b_7bit = 0 )
|
||||
: g( g_7bit ), r( r_7bit ), b( b_7bit )
|
||||
{
|
||||
}
|
||||
|
||||
LDP8806_GRB& operator=( const Rgb& o ) {
|
||||
//Convert 8->7bit colour
|
||||
r = ( o.r * 127 / 256 ) | 0x80;
|
||||
g = ( o.g * 127 / 256 ) | 0x80;
|
||||
b = ( o.b * 127 / 256 ) | 0x80;
|
||||
return *this;
|
||||
}
|
||||
|
||||
LDP8806_GRB& operator=( const Hsv& o ) {
|
||||
*this = Rgb{ o };
|
||||
return *this;
|
||||
}
|
||||
|
||||
uint8_t g, r, b;
|
||||
};
|
||||
|
||||
static const int LED_FRAME_SIZE_BYTES = sizeof( LDP8806_GRB );
|
||||
static const int LATCH_FRAME_SIZE_BYTES = 3;
|
||||
static const int TRANS_COUNT_MAX = 20;//Arbitrary, supports up to 600 LED
|
||||
|
||||
LDP8806( int count, int clkpin, int datapin, BufferType doubleBuffer = SingleBuffer, uint32_t clock_speed_hz = 2000000 )
|
||||
: _count( count ),
|
||||
_firstBuffer( new LDP8806_GRB[ count ] ),
|
||||
_secondBuffer( doubleBuffer ? new LDP8806_GRB[ count ] : nullptr ),
|
||||
// one 'latch'/start-of-data mark frame for every 32 leds
|
||||
_latchFrames( ( count + 31 ) / 32 )
|
||||
{
|
||||
spi_bus_config_t buscfg;
|
||||
memset( &buscfg, 0, sizeof( buscfg ) );
|
||||
buscfg.mosi_io_num = datapin;
|
||||
buscfg.miso_io_num = -1;
|
||||
buscfg.sclk_io_num = clkpin;
|
||||
buscfg.quadwp_io_num = -1;
|
||||
buscfg.quadhd_io_num = -1;
|
||||
buscfg.max_transfer_sz = 65535;
|
||||
|
||||
spi_device_interface_config_t devcfg;
|
||||
memset( &devcfg, 0, sizeof( devcfg ) );
|
||||
devcfg.clock_speed_hz = clock_speed_hz;
|
||||
devcfg.mode = 0;
|
||||
devcfg.spics_io_num = -1;
|
||||
devcfg.queue_size = TRANS_COUNT_MAX;
|
||||
devcfg.pre_cb = nullptr;
|
||||
|
||||
auto ret = spi_bus_initialize( HSPI_HOST, &buscfg, 1 );
|
||||
assert( ret == ESP_OK );
|
||||
|
||||
ret = spi_bus_add_device( HSPI_HOST, &devcfg, &_spi );
|
||||
assert( ret == ESP_OK );
|
||||
|
||||
std::fill_n( _latchBuffer, LATCH_FRAME_SIZE_BYTES, 0x0 );
|
||||
}
|
||||
|
||||
~LDP8806() {
|
||||
// noop
|
||||
}
|
||||
|
||||
LDP8806_GRB& operator[]( int idx ) {
|
||||
return _firstBuffer[ idx ];
|
||||
}
|
||||
|
||||
const LDP8806_GRB& operator[]( int idx ) const {
|
||||
return _firstBuffer[ idx ];
|
||||
}
|
||||
|
||||
void show() {
|
||||
_buffer = _firstBuffer.get();
|
||||
startTransmission();
|
||||
swapBuffers();
|
||||
}
|
||||
|
||||
void wait() {
|
||||
while ( _transCount-- ) {
|
||||
spi_transaction_t *t;
|
||||
spi_device_get_trans_result( _spi, &t, portMAX_DELAY );
|
||||
}
|
||||
}
|
||||
private:
|
||||
void swapBuffers() {
|
||||
if ( _secondBuffer )
|
||||
_firstBuffer.swap( _secondBuffer );
|
||||
}
|
||||
|
||||
void startTransmission() {
|
||||
_transCount = 0;
|
||||
for ( int i = 0; i != TRANS_COUNT_MAX; i++ ) {
|
||||
_transactions[ i ].cmd = 0;
|
||||
_transactions[ i ].addr = 0;
|
||||
_transactions[ i ].flags = 0;
|
||||
_transactions[ i ].rxlength = 0;
|
||||
_transactions[ i ].rx_buffer = nullptr;
|
||||
}
|
||||
// LED Data
|
||||
_transactions[ 0 ].length = ( LED_FRAME_SIZE_BYTES * 8 ) * _count;
|
||||
_transactions[ 0 ].tx_buffer = _buffer;
|
||||
spi_device_queue_trans( _spi, _transactions + _transCount, portMAX_DELAY );
|
||||
_transCount++;
|
||||
|
||||
// 'latch'/start-of-data marker frames
|
||||
for ( int i = 0; i < _latchFrames; i++ ) {
|
||||
_transactions[ _transCount ].length = ( LATCH_FRAME_SIZE_BYTES * 8 );
|
||||
_transactions[ _transCount ].tx_buffer = _latchBuffer;
|
||||
spi_device_queue_trans( _spi, _transactions + _transCount, portMAX_DELAY );
|
||||
_transCount++;
|
||||
}
|
||||
}
|
||||
|
||||
spi_device_handle_t _spi;
|
||||
int _count;
|
||||
std::unique_ptr< LDP8806_GRB[] > _firstBuffer, _secondBuffer;
|
||||
LDP8806_GRB *_buffer;
|
||||
|
||||
spi_transaction_t _transactions[ TRANS_COUNT_MAX ];
|
||||
int _transCount;
|
||||
|
||||
int _latchFrames;
|
||||
uint8_t _latchBuffer[ LATCH_FRAME_SIZE_BYTES ];
|
||||
};
|
||||
@@ -74,7 +74,7 @@ static camera_config_t camera_config = {
|
||||
|
||||
//XCLK 20MHz or 10MHz for OV2640 double FPS (Experimental)
|
||||
// .xclk_freq_hz = 20000000, // Orginalwert
|
||||
.xclk_freq_hz = 5000000, // Test, um die Bildfehler los zu werden !!!!
|
||||
.xclk_freq_hz = 5000000, // Test, um die Bildfehler los zu werden !!!!
|
||||
.ledc_timer = LEDC_TIMER_0,
|
||||
.ledc_channel = LEDC_CHANNEL_0,
|
||||
|
||||
@@ -86,6 +86,8 @@ static camera_config_t camera_config = {
|
||||
|
||||
.jpeg_quality = 5, //0-63 lower number means higher quality
|
||||
.fb_count = 1 //if more than one, i2s runs in continuous mode. Use only with JPEG
|
||||
// .grab_mode = CAMERA_GRAB_WHEN_EMPTY,
|
||||
|
||||
};
|
||||
|
||||
|
||||
@@ -279,13 +281,23 @@ esp_err_t CCamera::CaptureToBasisImage(CImageBasis *_Image, int delay)
|
||||
ESP_LOGE(TAGCAMERACLASS, "CaptureToBasisImage: Camera Capture Failed");
|
||||
LEDOnOff(false);
|
||||
LightOnOff(false);
|
||||
doReboot();
|
||||
|
||||
LogFile.SwitchOnOff(true);
|
||||
LogFile.WriteToFile("Camera is not working anymore - most propably hardware problem (instablility, ...). "
|
||||
"System will reboot.");
|
||||
doReboot();
|
||||
|
||||
return ESP_FAIL;
|
||||
}
|
||||
|
||||
int _size = fb->len;
|
||||
zwischenspeicher = (uint8_t*) malloc(_size);
|
||||
if (!zwischenspeicher)
|
||||
{
|
||||
ESP_LOGE(TAGCAMERACLASS, "Nicht ausreichend Speicherplatz für Bild in Funktion CaptureToBasisImage()");
|
||||
LogFile.SwitchOnOff(true);
|
||||
LogFile.WriteToFile("Nicht ausreichend Speicherplatz für Bild in Funktion CaptureToBasisImage()");
|
||||
}
|
||||
for (int i = 0; i < _size; ++i)
|
||||
*(zwischenspeicher + i) = *(fb->buf + i);
|
||||
esp_camera_fb_return(fb);
|
||||
|
||||
@@ -11,11 +11,48 @@
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#define OV9650_PID (0x96)
|
||||
#define OV7725_PID (0x77)
|
||||
#define OV2640_PID (0x26)
|
||||
#define OV3660_PID (0x36)
|
||||
#define OV5640_PID (0x56)
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
OV9650_PID = 0x96,
|
||||
OV7725_PID = 0x77,
|
||||
OV2640_PID = 0x26,
|
||||
OV3660_PID = 0x3660,
|
||||
OV5640_PID = 0x5640,
|
||||
OV7670_PID = 0x76,
|
||||
NT99141_PID = 0x1410,
|
||||
GC2145_PID = 0x2145,
|
||||
GC032A_PID = 0x232a,
|
||||
GC0308_PID = 0x9b,
|
||||
} camera_pid_t;
|
||||
|
||||
typedef enum {
|
||||
CAMERA_OV7725,
|
||||
CAMERA_OV2640,
|
||||
CAMERA_OV3660,
|
||||
CAMERA_OV5640,
|
||||
CAMERA_OV7670,
|
||||
CAMERA_NT99141,
|
||||
CAMERA_GC2145,
|
||||
CAMERA_GC032A,
|
||||
CAMERA_GC0308,
|
||||
CAMERA_MODEL_MAX,
|
||||
CAMERA_NONE,
|
||||
} camera_model_t;
|
||||
|
||||
typedef enum {
|
||||
OV2640_SCCB_ADDR = 0x30,// 0x60 >> 1
|
||||
OV5640_SCCB_ADDR = 0x3C,// 0x78 >> 1
|
||||
OV3660_SCCB_ADDR = 0x3C,// 0x78 >> 1
|
||||
OV7725_SCCB_ADDR = 0x21,// 0x42 >> 1
|
||||
OV7670_SCCB_ADDR = 0x21,// 0x42 >> 1
|
||||
NT99141_SCCB_ADDR = 0x2A,// 0x54 >> 1
|
||||
GC2145_SCCB_ADDR = 0x3C,// 0x78 >> 1
|
||||
GC032A_SCCB_ADDR = 0x21,// 0x42 >> 1
|
||||
GC0308_SCCB_ADDR = 0x21,// 0x42 >> 1
|
||||
} camera_sccb_addr_t;
|
||||
|
||||
typedef enum {
|
||||
PIXFORMAT_RGB565, // 2BPP/RGB565
|
||||
@@ -56,6 +93,15 @@ typedef enum {
|
||||
FRAMESIZE_INVALID
|
||||
} framesize_t;
|
||||
|
||||
typedef struct {
|
||||
const camera_model_t model;
|
||||
const char *name;
|
||||
const camera_sccb_addr_t sccb_addr;
|
||||
const camera_pid_t pid;
|
||||
const framesize_t max_size;
|
||||
const bool support_jpeg;
|
||||
} camera_sensor_info_t;
|
||||
|
||||
typedef enum {
|
||||
ASPECT_RATIO_4X3,
|
||||
ASPECT_RATIO_3X2,
|
||||
@@ -99,11 +145,13 @@ typedef struct {
|
||||
|
||||
// Resolution table (in sensor.c)
|
||||
extern const resolution_info_t resolution[];
|
||||
// camera sensor table (in sensor.c)
|
||||
extern const camera_sensor_info_t camera_sensor[];
|
||||
|
||||
typedef struct {
|
||||
uint8_t MIDH;
|
||||
uint8_t MIDL;
|
||||
uint8_t PID;
|
||||
uint16_t PID;
|
||||
uint8_t VER;
|
||||
} sensor_id_t;
|
||||
|
||||
@@ -188,4 +236,10 @@ typedef struct _sensor {
|
||||
int (*set_xclk) (sensor_t *sensor, int timer, int xclk);
|
||||
} sensor_t;
|
||||
|
||||
camera_sensor_info_t *esp_camera_sensor_get_info(sensor_id_t *id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __SENSOR_H__ */
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
#define SCRATCH_BUFSIZE2 8192
|
||||
char scratch2[SCRATCH_BUFSIZE2];
|
||||
// #define SCRATCH_BUFSIZE2 8192
|
||||
// char scratch2[SCRATCH_BUFSIZE2];
|
||||
|
||||
//#define DEBUG_DETAIL_ON
|
||||
static const char *TAGPARTCAMERA = "server_camera";
|
||||
|
||||
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "." "../../include"
|
||||
REQUIRES tfmicro esp_http_server app_update esp_http_client nvs_flash jomjol_tfliteclass jomjol_flowcontroll spiffs jomjol_helper jomjol_controlGPIO)
|
||||
REQUIRES tflite-lib esp_http_server app_update esp_http_client nvs_flash jomjol_tfliteclass jomjol_flowcontroll spiffs jomjol_helper jomjol_controlGPIO)
|
||||
|
||||
|
||||
|
||||
@@ -17,7 +17,14 @@
|
||||
#include <sys/param.h>
|
||||
#include <sys/unistd.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <dirent.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "esp_err.h"
|
||||
#include "esp_log.h"
|
||||
@@ -58,6 +65,52 @@ struct file_server_data {
|
||||
|
||||
static const char *TAG_FILESERVER = "file_server";
|
||||
|
||||
|
||||
#include <iostream>
|
||||
#include <sys/types.h>
|
||||
#include <dirent.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
static esp_err_t get_tflite_file_handler(httpd_req_t *req){
|
||||
DIR *verzeichnis;
|
||||
struct dirent *files;
|
||||
|
||||
std::string _filename, _fileext, _result = "";
|
||||
std::string _delimiter = ".";
|
||||
size_t pos = 0;
|
||||
|
||||
verzeichnis=opendir("/sdcard/config");
|
||||
|
||||
printf("Suche TFLITE in /sdcard/config\n");
|
||||
|
||||
while((files = readdir(verzeichnis)))
|
||||
{
|
||||
_filename = files->d_name;
|
||||
_fileext = _filename;
|
||||
printf("File: %s\t", _filename.c_str());
|
||||
|
||||
while ((pos = _fileext.find(_delimiter))) {
|
||||
_fileext.erase(0, pos + _delimiter.length());
|
||||
}
|
||||
|
||||
printf(" Extension: %s\n", _fileext.c_str());
|
||||
|
||||
if ((_fileext == "tfl") || (_fileext == "tflite"))
|
||||
{
|
||||
_result = _result + _filename + "\t";
|
||||
}
|
||||
}
|
||||
closedir(verzeichnis);
|
||||
|
||||
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
|
||||
httpd_resp_set_type(req, "text/plain");
|
||||
httpd_resp_sendstr_chunk(req, _result.c_str());
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
|
||||
/* Handler to redirect incoming GET request for /index.html to /
|
||||
* This can be overridden by uploading file with same name */
|
||||
// static esp_err_t index_html_get_handler(httpd_req_t *req)
|
||||
@@ -797,4 +850,15 @@ void register_server_file_uri(httpd_handle_t server, const char *base_path)
|
||||
};
|
||||
httpd_register_uri_handler(server, &file_delete);
|
||||
|
||||
|
||||
/* URI handler for getting tflite files from server */
|
||||
/*
|
||||
httpd_uri_t file_tflite = {
|
||||
.uri = "/tflite", // Match all URIs of type /delete/path/to/file
|
||||
.method = HTTP_GET,
|
||||
.handler = get_tflite_file_handler,
|
||||
.user_ctx = server_data // Pass server data as context
|
||||
};
|
||||
httpd_register_uri_handler(server, &file_tflite);
|
||||
*/
|
||||
}
|
||||
|
||||
@@ -5,7 +5,14 @@
|
||||
#include <sys/param.h>
|
||||
#include <sys/unistd.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <dirent.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "esp_err.h"
|
||||
#include "esp_log.h"
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "server_file.h"
|
||||
#include "server_GPIO.h"
|
||||
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
#include "Helper.h"
|
||||
|
||||
@@ -6,7 +6,15 @@
|
||||
#include "freertos/task.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <dirent.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
#include "time_sntp.h"
|
||||
#include "Helper.h"
|
||||
@@ -624,4 +632,37 @@ esp_err_t ClassFlowControll::GetJPGStream(std::string _fn, httpd_req_t *req)
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
string ClassFlowControll::getJSON()
|
||||
{
|
||||
std::vector<NumberPost*>* NUMBERS = flowpostprocessing->GetNumbers();
|
||||
|
||||
std::string json="{\n";
|
||||
|
||||
for (int i = 0; i < (*NUMBERS).size(); ++i)
|
||||
{
|
||||
json += "\"" + (*NUMBERS)[i]->name + "\":\n";
|
||||
json += " {\n";
|
||||
if ((*NUMBERS)[i]->ReturnValueNoError.length() > 0)
|
||||
json += " \"value\": " + (*NUMBERS)[i]->ReturnValueNoError + ",\n";
|
||||
else
|
||||
json += " \"value\": \"\",\n";
|
||||
json += " \"raw\": \"" + (*NUMBERS)[i]->ReturnRawValue + "\",\n";
|
||||
json += " \"error\": \"" + (*NUMBERS)[i]->ErrorMessageText + "\",\n";
|
||||
if ((*NUMBERS)[i]->ReturnRateValue.length() > 0)
|
||||
json += " \"rate\": " + (*NUMBERS)[i]->ReturnRateValue + ",\n";
|
||||
else
|
||||
json += " \"rate\": \"\",\n";
|
||||
|
||||
json += " \"timestamp\": \"" + (*NUMBERS)[i]->timeStamp + "\"\n";
|
||||
if ((i+1) < (*NUMBERS).size())
|
||||
json += " },\n";
|
||||
else
|
||||
json += " }\n";
|
||||
}
|
||||
json += "}";
|
||||
|
||||
return json;
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ public:
|
||||
string UpdatePrevalue(std::string _newvalue, std::string _numbers, bool _extern);
|
||||
string GetPrevalue(std::string _number = "");
|
||||
bool ReadParameter(FILE* pfile, string& aktparamgraph);
|
||||
string getJSON();
|
||||
|
||||
string TranslateAktstatus(std::string _input);
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ struct NumberPost {
|
||||
float FlowRateAct; // m3 / min
|
||||
float PreValue; // letzter Wert, der gut ausgelesen wurde
|
||||
float Value; // letzer ausgelesener Wert, inkl. Korrekturen
|
||||
string ReturnRateValue; // RückgabewertRate
|
||||
string ReturnRawValue; // Rohwert (mit N & führenden 0)
|
||||
string ReturnValue; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
|
||||
string ReturnPreValue; // korrigierter Rückgabewert ohne Fehlermeldung
|
||||
|
||||
@@ -2,7 +2,15 @@
|
||||
#include <string>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <dirent.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "time_sntp.h"
|
||||
#include "ClassLogFile.h"
|
||||
#include "CImageBasis.h"
|
||||
|
||||
@@ -135,6 +135,7 @@ bool ClassFlowMQTT::doFlow(string zwtime)
|
||||
|
||||
std::string result;
|
||||
std::string resulterror = "";
|
||||
std::string resultraw = "";
|
||||
std::string resultrate = "";
|
||||
std::string resulttimestamp = "";
|
||||
string zw = "";
|
||||
@@ -159,8 +160,9 @@ bool ClassFlowMQTT::doFlow(string zwtime)
|
||||
for (int i = 0; i < (*NUMBERS).size(); ++i)
|
||||
{
|
||||
result = (*NUMBERS)[i]->ReturnValueNoError;
|
||||
resultraw = (*NUMBERS)[i]->ReturnRawValue;
|
||||
resulterror = (*NUMBERS)[i]->ErrorMessageText;
|
||||
resultrate = std::to_string((*NUMBERS)[i]->FlowRateAct);
|
||||
resultrate = (*NUMBERS)[i]->ReturnRateValue;
|
||||
resulttimestamp = (*NUMBERS)[i]->timeStamp;
|
||||
|
||||
namenumber = (*NUMBERS)[i]->name;
|
||||
@@ -169,22 +171,40 @@ bool ClassFlowMQTT::doFlow(string zwtime)
|
||||
else
|
||||
namenumber = maintopic + "/" + namenumber + "/";
|
||||
|
||||
zw = namenumber + "value";
|
||||
MQTTPublish(zw, result);
|
||||
zw = namenumber + "value";
|
||||
if (result.length() > 0)
|
||||
MQTTPublish(zw, result);
|
||||
|
||||
zw = namenumber + "error";
|
||||
MQTTPublish(zw, resulterror, 1);
|
||||
zw = namenumber + "error";
|
||||
if (resulterror.length() > 0)
|
||||
MQTTPublish(zw, resulterror, 1);
|
||||
|
||||
zw = namenumber + "rate";
|
||||
MQTTPublish(zw, resultrate);
|
||||
zw = namenumber + "rate";
|
||||
if (resultrate.length() > 0)
|
||||
MQTTPublish(zw, resultrate);
|
||||
|
||||
zw = namenumber + "raw";
|
||||
if (resultraw.length() > 0)
|
||||
MQTTPublish(zw, resultraw);
|
||||
|
||||
zw = namenumber + "timestamp";
|
||||
MQTTPublish(zw, resulttimestamp);
|
||||
if (resulttimestamp.length() > 0)
|
||||
MQTTPublish(zw, resulttimestamp);
|
||||
|
||||
|
||||
std::string json="{\"value\":"+result;
|
||||
json += ",\"error\":\""+resulterror;
|
||||
json += "\",\"rate\":"+resultrate;
|
||||
std::string json = "";
|
||||
|
||||
if (result.length() > 0)
|
||||
json += "{\"value\":"+result;
|
||||
else
|
||||
json += "{\"value\":\"\"";
|
||||
|
||||
json += ",\"raw\":\""+resultraw;
|
||||
json += "\",\"error\":\""+resulterror;
|
||||
if (resultrate.length() > 0)
|
||||
json += "\",\"rate\":"+resultrate;
|
||||
else
|
||||
json += "\",\"rate\":\"\"";
|
||||
json += ",\"timestamp\":\""+resulttimestamp+"\"}";
|
||||
|
||||
zw = namenumber + "json";
|
||||
|
||||
@@ -117,6 +117,7 @@ bool ClassFlowPostProcessing::LoadPreValue(void)
|
||||
else
|
||||
{
|
||||
NUMBERS[j]->PreValueOkay = true;
|
||||
/*
|
||||
NUMBERS[j]->Value = NUMBERS[j]->PreValue;
|
||||
NUMBERS[j]->ReturnValue = to_string(NUMBERS[j]->Value);
|
||||
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
|
||||
@@ -126,6 +127,7 @@ bool ClassFlowPostProcessing::LoadPreValue(void)
|
||||
NUMBERS[j]->ReturnValue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->Nachkomma + 1); // SIcherheitshalber 1 Stelle mehr, da ggf. Exgtended Resolution an ist (wird erst beim ersten Durchlauf gesetzt)
|
||||
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
}
|
||||
@@ -656,7 +658,12 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
|
||||
zwvalue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->Nachkomma);
|
||||
}
|
||||
|
||||
if (NUMBERS[j]->useMaxRateValue && ((abs(NUMBERS[j]->Value - NUMBERS[j]->PreValue) > NUMBERS[j]->MaxRateValue)))
|
||||
double difference = difftime(imagetime, NUMBERS[j]->lastvalue); // in Sekunden
|
||||
difference /= 60; // in Minuten
|
||||
NUMBERS[j]->FlowRateAct = (NUMBERS[j]->Value - NUMBERS[j]->PreValue) / difference;
|
||||
NUMBERS[j]->ReturnRateValue = std::to_string(NUMBERS[j]->FlowRateAct);
|
||||
|
||||
if (NUMBERS[j]->useMaxRateValue && (abs(NUMBERS[j]->FlowRateAct) > NUMBERS[j]->MaxRateValue))
|
||||
{
|
||||
NUMBERS[j]->ErrorMessageText = NUMBERS[j]->ErrorMessageText + "Rate too high - Read: " + RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->Nachkomma) + " - Pre: " + RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
|
||||
NUMBERS[j]->Value = NUMBERS[j]->PreValue;
|
||||
@@ -668,19 +675,24 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
|
||||
if (NUMBERS[j]->ErrorMessage && (NUMBERS[j]->ErrorMessageText.length() > 0))
|
||||
NUMBERS[j]->ReturnValue = NUMBERS[j]->ReturnValue + "\t" + NUMBERS[j]->ErrorMessageText;
|
||||
|
||||
|
||||
double difference = difftime(imagetime, NUMBERS[j]->lastvalue); // in Sekunden
|
||||
difference /= 60; // in Minuten
|
||||
NUMBERS[j]->FlowRateAct = (NUMBERS[j]->Value - NUMBERS[j]->PreValue) / difference;
|
||||
NUMBERS[j]->lastvalue = imagetime;
|
||||
|
||||
if (NUMBERS[j]->ErrorMessageText.length() == 0)
|
||||
{
|
||||
NUMBERS[j]->lastvalue = imagetime;
|
||||
NUMBERS[j]->PreValue = NUMBERS[j]->Value;
|
||||
|
||||
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
|
||||
NUMBERS[j]->ReturnPreValue = RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
|
||||
NUMBERS[j]->ErrorMessageText = "no error";
|
||||
UpdatePreValueINI = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
NUMBERS[j]->ReturnRateValue = "";
|
||||
NUMBERS[j]->ReturnValue = "";
|
||||
NUMBERS[j]->ReturnValueNoError = "";
|
||||
NUMBERS[j]->timeStamp = "";
|
||||
|
||||
}
|
||||
}
|
||||
string _zw = "PostProcessing - Raw: " + NUMBERS[j]->ReturnRawValue + " Value: " + NUMBERS[j]->ReturnValue + " Error: " + NUMBERS[j]->ErrorMessageText;
|
||||
LogFile.WriteToFile(_zw);
|
||||
|
||||
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES tfmicro jomjol_logfile)
|
||||
REQUIRES tflite-lib jomjol_logfile)
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,15 @@
|
||||
#include "Helper.h"
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <dirent.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <string.h>
|
||||
#include <esp_log.h>
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include "CImageBasis.h"
|
||||
#include "Helper.h"
|
||||
#include "ClassLogFile.h"
|
||||
#include "server_ota.h"
|
||||
|
||||
#include <esp_log.h>
|
||||
|
||||
@@ -337,6 +338,18 @@ void CImageBasis::LoadFromMemory(stbi_uc *_buffer, int len)
|
||||
rgb_image = stbi_load_from_memory(_buffer, len, &width, &height, &channels, 3);
|
||||
bpp = channels;
|
||||
printf("Image loaded from memory: %d, %d, %d\n", width, height, channels);
|
||||
if ((width * height * channels) == 0)
|
||||
{
|
||||
ESP_LOGE(TAG, "Image with size 0 loaded --> reboot to be done! "
|
||||
"Check that your camera module is working and connected properly.");
|
||||
|
||||
LogFile.SwitchOnOff(true);
|
||||
LogFile.WriteToFile("Image with size 0 loaded --> reboot to be done! "
|
||||
"Check that your camera module is working and connected properly.");
|
||||
|
||||
doReboot();
|
||||
|
||||
}
|
||||
RGBImageRelease();
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES jomjol_helper jomjol_logfile esp_http_server)
|
||||
REQUIRES jomjol_helper jomjol_logfile esp_http_server jomjol_fileserver_ota)
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,15 @@
|
||||
#include <string.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <dirent.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "Helper.h"
|
||||
|
||||
static const char *TAG = "log";
|
||||
|
||||
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES tfmicro mqtt jomjol_logfile)
|
||||
REQUIRES tflite-lib mqtt jomjol_logfile)
|
||||
|
||||
|
||||
|
||||
@@ -236,14 +236,13 @@ bool CTfLiteClass::LoadModel(std::string _fn){
|
||||
this->error_reporter = new tflite::MicroErrorReporter;
|
||||
#endif
|
||||
|
||||
unsigned char *rd;
|
||||
rd = ReadFileToCharArray(_fn.c_str());
|
||||
modelload = ReadFileToCharArray(_fn.c_str());
|
||||
|
||||
if (rd == NULL)
|
||||
if (modelload == NULL)
|
||||
return false;
|
||||
|
||||
this->model = tflite::GetModel(rd);
|
||||
free(rd);
|
||||
model = tflite::GetModel(modelload);
|
||||
// free(rd);
|
||||
TFLITE_MINIMAL_CHECK(model != nullptr);
|
||||
|
||||
return true;
|
||||
@@ -266,6 +265,8 @@ CTfLiteClass::~CTfLiteClass()
|
||||
delete this->tensor_arena;
|
||||
delete this->interpreter;
|
||||
delete this->error_reporter;
|
||||
if (modelload)
|
||||
free(modelload);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -46,6 +46,9 @@ class CTfLiteClass
|
||||
int kTensorArenaSize;
|
||||
uint8_t *tensor_arena;
|
||||
|
||||
unsigned char *modelload = NULL;
|
||||
|
||||
|
||||
float* input;
|
||||
int input_i;
|
||||
int im_height, im_width, im_channel;
|
||||
|
||||
@@ -189,6 +189,36 @@ esp_err_t handler_doflow(httpd_req_t *req)
|
||||
};
|
||||
|
||||
|
||||
esp_err_t handler_json(httpd_req_t *req)
|
||||
{
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
LogFile.WriteHeapInfo("handler_json - Start");
|
||||
#endif
|
||||
|
||||
|
||||
printf("handler_JSON uri:\n"); printf(req->uri); printf("\n");
|
||||
|
||||
char _query[100];
|
||||
char _size[10];
|
||||
|
||||
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
|
||||
httpd_resp_set_type(req, "application/json");
|
||||
|
||||
std::string zw = tfliteflow.getJSON();
|
||||
if (zw.length() > 0)
|
||||
httpd_resp_sendstr_chunk(req, zw.c_str());
|
||||
|
||||
string query = std::string(_query);
|
||||
|
||||
/* Respond with an empty chunk to signal HTTP response completion */
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
LogFile.WriteHeapInfo("handler_JSON - Done");
|
||||
#endif
|
||||
return ESP_OK;
|
||||
};
|
||||
|
||||
|
||||
|
||||
esp_err_t handler_wasserzaehler(httpd_req_t *req)
|
||||
@@ -664,7 +694,23 @@ void task_autodoFlow(void *pvParameter)
|
||||
|
||||
void TFliteDoAutoStart()
|
||||
{
|
||||
xTaskCreate(&task_autodoFlow, "task_autodoFlow", configMINIMAL_STACK_SIZE * 64, NULL, tskIDLE_PRIORITY+1, &xHandletask_autodoFlow);
|
||||
BaseType_t xReturned;
|
||||
|
||||
int _i = configMINIMAL_STACK_SIZE;
|
||||
|
||||
printf("task_autodoFlow configMINIMAL_STACK_SIZE: %d\n", _i);
|
||||
printf("getESPHeapInfo: %s\n", getESPHeapInfo().c_str());
|
||||
|
||||
xReturned = xTaskCreate(&task_autodoFlow, "task_autodoFlow", configMINIMAL_STACK_SIZE * 60, NULL, tskIDLE_PRIORITY+1, &xHandletask_autodoFlow);
|
||||
if( xReturned != pdPASS )
|
||||
{
|
||||
|
||||
//Memory: 64 --> 48 --> 35 --> 25
|
||||
printf("ERROR task_autodoFlow konnte nicht erzeugt werden !!\r\n");
|
||||
}
|
||||
printf("getESPHeapInfo: %s\n", getESPHeapInfo().c_str());
|
||||
|
||||
|
||||
}
|
||||
|
||||
std::string GetMQTTMainTopic()
|
||||
@@ -710,4 +756,10 @@ void register_server_tflite_uri(httpd_handle_t server)
|
||||
camuri.handler = handler_wasserzaehler;
|
||||
camuri.user_ctx = (void*) "Wasserzaehler";
|
||||
httpd_register_uri_handler(server, &camuri);
|
||||
|
||||
camuri.uri = "/json";
|
||||
camuri.handler = handler_json;
|
||||
camuri.user_ctx = (void*) "JSON";
|
||||
httpd_register_uri_handler(server, &camuri);
|
||||
|
||||
}
|
||||
|
||||
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES tfmicro jomjol_logfile)
|
||||
REQUIRES tflite-lib jomjol_logfile)
|
||||
|
||||
|
||||
|
||||
50
code/components/tflite-lib/CMakeLists.txt
Normal file
50
code/components/tflite-lib/CMakeLists.txt
Normal file
@@ -0,0 +1,50 @@
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
|
||||
set(tflite_dir "${CMAKE_CURRENT_SOURCE_DIR}/tensorflow/lite")
|
||||
set(tfmicro_dir "${tflite_dir}/micro")
|
||||
set(tfmicro_frontend_dir "${tflite_dir}/experimental/microfrontend/lib")
|
||||
set(tfmicro_kernels_dir "${tfmicro_dir}/kernels")
|
||||
|
||||
file(GLOB srcs_micro
|
||||
"${tfmicro_dir}/*.cc"
|
||||
"${tfmicro_dir}/*.c")
|
||||
|
||||
file(GLOB src_micro_frontend
|
||||
"${tfmicro_frontend_dir}/*.c"
|
||||
"${tfmicro_frontend_dir}/*.cc")
|
||||
file(GLOB srcs_kernels
|
||||
"${tfmicro_kernels_dir}/*.c"
|
||||
"${tfmicro_kernels_dir}/*.cc")
|
||||
|
||||
set(lib_srcs
|
||||
"${srcs_micro}"
|
||||
"${srcs_kernels}"
|
||||
"${src_micro_frontend}"
|
||||
"${tflite_dir}/kernels/kernel_util.cc"
|
||||
"${tflite_dir}/micro/memory_planner/greedy_memory_planner.cc"
|
||||
"${tflite_dir}/micro/memory_planner/linear_memory_planner.cc"
|
||||
"${tflite_dir}/c/common.c"
|
||||
"${tflite_dir}/core/api/error_reporter.cc"
|
||||
"${tflite_dir}/core/api/flatbuffer_conversions.cc"
|
||||
"${tflite_dir}/core/api/op_resolver.cc"
|
||||
"${tflite_dir}/core/api/tensor_utils.cc"
|
||||
"${tflite_dir}/kernels/internal/quantization_util.cc"
|
||||
"${tflite_dir}/schema/schema_utils.cc")
|
||||
|
||||
idf_component_register(
|
||||
SRCS "${lib_srcs}"
|
||||
INCLUDE_DIRS "." "third_party/gemmlowp"
|
||||
"third_party/flatbuffers/include"
|
||||
"third_party/ruy"
|
||||
"third_party/kissfft")
|
||||
|
||||
# Reduce the level of paranoia to be able to compile TF sources
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE
|
||||
-Wno-maybe-uninitialized
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-type-limits)
|
||||
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP -DESP_NN -Wno-nonnull -Wno-nonnull -Wno-nonnull)
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE $<$<COMPILE_LANGUAGE:CXX>: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP -DESP_NN -Wno-return-type -Wno-strict-aliasing -std=gnu++14 -Wno-return-type -Wno-strict-aliasing -std=gnu++14 -Wno-return-type -Wno-strict-aliasing -std=gnu++14 >)
|
||||
target_compile_options(${COMPONENT_LIB} INTERFACE $<$<IN_LIST:-DTF_LITE_STATIC_MEMORY,$<TARGET_PROPERTY:${COMPONENT_LIB},COMPILE_OPTIONS>>:-DTF_LITE_STATIC_MEMORY>)
|
||||
target_link_libraries(${COMPONENT_LIB} PRIVATE -lm)
|
||||
@@ -502,6 +502,22 @@ typedef struct {
|
||||
const char* shared_name;
|
||||
} TfLiteVarHandleParams;
|
||||
|
||||
typedef struct {
|
||||
int seed;
|
||||
int seed2;
|
||||
} TfLiteRandomParams;
|
||||
|
||||
typedef struct {
|
||||
int num_boundaries;
|
||||
// This points to the memory stored in the model (flatbuffer),
|
||||
// and is not owned.
|
||||
const float* boundaries;
|
||||
} TfLiteBucketizeParams;
|
||||
|
||||
typedef struct {
|
||||
bool approximate;
|
||||
} TfLiteGeluParams;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
@@ -43,6 +43,9 @@ extern "C" {
|
||||
#endif // _WIN32
|
||||
#endif // SWIG
|
||||
|
||||
// Note that new error status values may be added in future in order to
|
||||
// indicate more fine-grained internal states, therefore, applications should
|
||||
// not rely on status values being members of the enum.
|
||||
typedef enum TfLiteStatus {
|
||||
kTfLiteOk = 0,
|
||||
|
||||
@@ -54,7 +57,7 @@ typedef enum TfLiteStatus {
|
||||
|
||||
// Generally referring to an error in applying a delegate due to
|
||||
// incompatibility between runtime and delegate, e.g., this error is returned
|
||||
// when trying to apply a TfLite delegate onto a model graph that's already
|
||||
// when trying to apply a TF Lite delegate onto a model graph that's already
|
||||
// immutable.
|
||||
kTfLiteApplicationError = 3,
|
||||
|
||||
@@ -68,7 +71,12 @@ typedef enum TfLiteStatus {
|
||||
|
||||
// Generally referring to data-reading issues in delegate serialization.
|
||||
// See tflite::delegates::Serialization.
|
||||
kTfLiteDelegateDataReadError = 5,
|
||||
kTfLiteDelegateDataReadError = 6,
|
||||
|
||||
// Generally referring to issues when the TF Lite model has ops that cannot be
|
||||
// resolved at runtime. This could happen when the specific op is not
|
||||
// registered or built with the TF Lite framework.
|
||||
kTfLiteUnresolvedOps = 7,
|
||||
} TfLiteStatus;
|
||||
|
||||
// Types supported by tensor
|
||||
@@ -21,9 +21,15 @@ limitations under the License.
|
||||
#include <string.h>
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
|
||||
int TfLiteIntArrayGetSizeInBytes(int size) {
|
||||
size_t TfLiteIntArrayGetSizeInBytes(int size) {
|
||||
static TfLiteIntArray dummy;
|
||||
return sizeof(dummy) + sizeof(dummy.data[0]) * size;
|
||||
|
||||
size_t computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
|
||||
#if defined(_MSC_VER)
|
||||
// Context for why this is needed is in http://b/189926408#comment21
|
||||
computed_size -= sizeof(dummy.data[0]);
|
||||
#endif
|
||||
return computed_size;
|
||||
}
|
||||
|
||||
int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {
|
||||
@@ -45,7 +51,7 @@ int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
|
||||
TfLiteIntArray* TfLiteIntArrayCreate(int size) {
|
||||
int alloc_size = TfLiteIntArrayGetSizeInBytes(size);
|
||||
size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size);
|
||||
if (alloc_size <= 0) return NULL;
|
||||
TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
|
||||
if (!ret) return ret;
|
||||
@@ -68,7 +74,13 @@ void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); }
|
||||
|
||||
int TfLiteFloatArrayGetSizeInBytes(int size) {
|
||||
static TfLiteFloatArray dummy;
|
||||
return sizeof(dummy) + sizeof(dummy.data[0]) * size;
|
||||
|
||||
int computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
|
||||
#if defined(_MSC_VER)
|
||||
// Context for why this is needed is in http://b/189926408#comment21
|
||||
computed_size -= sizeof(dummy.data[0]);
|
||||
#endif
|
||||
return computed_size;
|
||||
}
|
||||
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
@@ -176,6 +188,26 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
|
||||
tensor->quantization.params = NULL;
|
||||
}
|
||||
|
||||
TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) {
|
||||
if (!src || !dst)
|
||||
return kTfLiteOk;
|
||||
if (src->bytes != dst->bytes)
|
||||
return kTfLiteError;
|
||||
if (src == dst)
|
||||
return kTfLiteOk;
|
||||
|
||||
dst->type = src->type;
|
||||
if (dst->dims)
|
||||
TfLiteIntArrayFree(dst->dims);
|
||||
dst->dims = TfLiteIntArrayCopy(src->dims);
|
||||
memcpy(dst->data.raw, src->data.raw, src->bytes);
|
||||
dst->buffer_handle = src->buffer_handle;
|
||||
dst->data_is_stale = src->data_is_stale;
|
||||
dst->delegate = src->delegate;
|
||||
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
|
||||
if (tensor->allocation_type != kTfLiteDynamic &&
|
||||
tensor->allocation_type != kTfLitePersistentRo) {
|
||||
@@ -80,12 +80,16 @@ typedef struct TfLiteExternalContext {
|
||||
// indices
|
||||
typedef struct TfLiteIntArray {
|
||||
int size;
|
||||
// gcc 6.1+ have a bug where flexible members aren't properly handled
|
||||
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
|
||||
#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1) || \
|
||||
defined(HEXAGON) || \
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
// Context for why this is needed is in http://b/189926408#comment21
|
||||
int data[1];
|
||||
#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1) || \
|
||||
defined(HEXAGON) || \
|
||||
(defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1)
|
||||
// gcc 6.1+ have a bug where flexible members aren't properly handled
|
||||
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
|
||||
int data[0];
|
||||
#else
|
||||
int data[];
|
||||
@@ -94,7 +98,7 @@ typedef struct TfLiteIntArray {
|
||||
|
||||
// Given the size (number of elements) in a TfLiteIntArray, calculate its size
|
||||
// in bytes.
|
||||
int TfLiteIntArrayGetSizeInBytes(int size);
|
||||
size_t TfLiteIntArrayGetSizeInBytes(int size);
|
||||
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
// Create a array of a given `size` (uninitialized entries).
|
||||
@@ -121,11 +125,15 @@ void TfLiteIntArrayFree(TfLiteIntArray* a);
|
||||
// Fixed size list of floats. Used for per-channel quantization.
|
||||
typedef struct TfLiteFloatArray {
|
||||
int size;
|
||||
// gcc 6.1+ have a bug where flexible members aren't properly handled
|
||||
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
|
||||
// This also applies to the toolchain used for Qualcomm Hexagon DSPs.
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1
|
||||
#if defined(_MSC_VER)
|
||||
// Context for why this is needed is in http://b/189926408#comment21
|
||||
float data[1];
|
||||
#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1) || \
|
||||
defined(HEXAGON) || \
|
||||
(defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1)
|
||||
// gcc 6.1+ have a bug where flexible members aren't properly handled
|
||||
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
|
||||
float data[0];
|
||||
#else
|
||||
float data[];
|
||||
@@ -562,6 +570,10 @@ typedef struct TfLiteNode {
|
||||
// Outputs to this node expressed as indices into the simulator's tensors.
|
||||
TfLiteIntArray* outputs;
|
||||
|
||||
// intermediate tensors to this node expressed as indices into the simulator's
|
||||
// tensors.
|
||||
TfLiteIntArray* intermediates;
|
||||
|
||||
// Opaque data provided by the node implementer through `Registration.init`.
|
||||
void* user_data;
|
||||
|
||||
@@ -614,6 +626,16 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
|
||||
const void* allocation, bool is_variable,
|
||||
TfLiteTensor* tensor);
|
||||
|
||||
// Copies the contents of 'src' in 'dst'.
|
||||
// Function does nothing if either 'src' or 'dst' is passed as nullptr and
|
||||
// return kTfLiteOk.
|
||||
// Returns kTfLiteError if 'src' and 'dst' doesn't have matching data size.
|
||||
// Note function copies contents, so it won't create new data pointer
|
||||
// or change allocation type.
|
||||
// All Tensor related properties will be copied from 'src' to 'dst' like
|
||||
// quantization, sparsity, ...
|
||||
TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst);
|
||||
|
||||
// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
|
||||
// types other than kTfLiteDynamic will be ignored.
|
||||
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
|
||||
@@ -819,6 +841,9 @@ typedef struct TfLiteContext {
|
||||
|
||||
typedef struct TfLiteRegistration {
|
||||
// Initializes the op from serialized data.
|
||||
// Called only *once* for the lifetime of the op, so any one-time allocations
|
||||
// should be made here (unless they depend on tensor sizes).
|
||||
//
|
||||
// If a built-in op:
|
||||
// `buffer` is the op's params data (TfLiteLSTMParams*).
|
||||
// `length` is zero.
|
||||
@@ -841,6 +866,7 @@ typedef struct TfLiteRegistration {
|
||||
// prepare is called when the inputs this node depends on have been resized.
|
||||
// context->ResizeTensor() can be called to request output tensors to be
|
||||
// resized.
|
||||
// Can be called multiple times for the lifetime of the op.
|
||||
//
|
||||
// Returns kTfLiteOk on success.
|
||||
TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -131,6 +131,17 @@ TfLitePadding ConvertPadding(Padding padding) {
|
||||
return kTfLitePaddingUnknown;
|
||||
}
|
||||
|
||||
// Converts the flatbuffer mirror padding enum to what is used at runtime.
|
||||
TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) {
|
||||
switch (padding) {
|
||||
case MirrorPadMode_REFLECT:
|
||||
return kTfLiteMirrorPaddingReflect;
|
||||
case MirrorPadMode_SYMMETRIC:
|
||||
return kTfLiteMirrorPaddingSymmetric;
|
||||
}
|
||||
return kTfLiteMirrorPaddingUnknown;
|
||||
}
|
||||
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
ErrorReporter* error_reporter,
|
||||
@@ -181,6 +192,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
return ParseArgMin(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_ASSIGN_VARIABLE: {
|
||||
return ParseAssignVariable(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_AVERAGE_POOL_2D: {
|
||||
return ParsePool(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
@@ -193,6 +208,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_CALL_ONCE: {
|
||||
return ParseCallOnce(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_CEIL: {
|
||||
return ParseCeil(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
@@ -325,6 +344,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
return ParsePool(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_MIRROR_PAD: {
|
||||
return ParseMirrorPad(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_MEAN: {
|
||||
return ParseReducer(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
@@ -369,6 +392,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
return ParseQuantize(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_READ_VARIABLE: {
|
||||
return ParseReadVariable(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_REDUCE_ANY: {
|
||||
return ParseReducer(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
@@ -486,6 +513,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
return ParseUnpack(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_VAR_HANDLE: {
|
||||
return ParseVarHandle(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_ZEROS_LIKE: {
|
||||
return ParseZerosLike(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
@@ -606,21 +637,8 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
|
||||
auto params =
|
||||
safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
if (const auto* seq_lstm_params =
|
||||
op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
|
||||
params->activation =
|
||||
ConvertActivation(seq_lstm_params->fused_activation_function());
|
||||
params->cell_clip = seq_lstm_params->cell_clip();
|
||||
params->proj_clip = seq_lstm_params->proj_clip();
|
||||
params->time_major = seq_lstm_params->time_major();
|
||||
params->asymmetric_quantize_inputs =
|
||||
seq_lstm_params->asymmetric_quantize_inputs();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator,
|
||||
builtin_data);
|
||||
}
|
||||
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
|
||||
auto params =
|
||||
@@ -693,19 +711,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_MIRROR_PAD: {
|
||||
auto params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions();
|
||||
if (mirror_pad_params != nullptr) {
|
||||
params->mode =
|
||||
mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT
|
||||
? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect
|
||||
: TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric;
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_UNIQUE: {
|
||||
auto params = safe_allocator.Allocate<TfLiteUniqueParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
@@ -750,16 +755,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_CALL_ONCE: {
|
||||
auto params = safe_allocator.Allocate<TfLiteCallOnceParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
if (const auto* call_once_params =
|
||||
op->builtin_options_as_CallOnceOptions()) {
|
||||
params->init_subgraph_index = call_once_params->init_subgraph_index();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_CONV_3D:
|
||||
case BuiltinOperator_CONV_3D_TRANSPOSE: {
|
||||
auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
|
||||
@@ -793,17 +788,69 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_VAR_HANDLE: {
|
||||
auto params = safe_allocator.Allocate<TfLiteVarHandleParams>();
|
||||
case BuiltinOperator_MULTINOMIAL: {
|
||||
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
params->container = nullptr;
|
||||
params->shared_name = nullptr;
|
||||
if (const auto* var_handle_params =
|
||||
op->builtin_options_as_VarHandleOptions()) {
|
||||
if (var_handle_params->container())
|
||||
params->container = var_handle_params->container()->c_str();
|
||||
if (var_handle_params->shared_name())
|
||||
params->shared_name = var_handle_params->shared_name()->c_str();
|
||||
if (const auto* multinomial_params =
|
||||
op->builtin_options_as_RandomOptions()) {
|
||||
params->seed = multinomial_params->seed();
|
||||
params->seed2 = multinomial_params->seed2();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_RANDOM_STANDARD_NORMAL: {
|
||||
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
if (const auto* random_std_normal_params =
|
||||
op->builtin_options_as_RandomOptions()) {
|
||||
params->seed = random_std_normal_params->seed();
|
||||
params->seed2 = random_std_normal_params->seed2();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_BUCKETIZE: {
|
||||
auto params = safe_allocator.Allocate<TfLiteBucketizeParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
if (const auto* bucketize_params =
|
||||
op->builtin_options_as_BucketizeOptions()) {
|
||||
const flatbuffers::Vector<float>* boundaries =
|
||||
bucketize_params->boundaries();
|
||||
if (boundaries == nullptr) {
|
||||
TF_LITE_REPORT_ERROR(
|
||||
error_reporter,
|
||||
"boundaries array not provided for operation 'bucketize'.\n");
|
||||
return kTfLiteError;
|
||||
}
|
||||
params->num_boundaries = boundaries->size();
|
||||
if (boundaries->data() == nullptr) {
|
||||
TF_LITE_REPORT_ERROR(error_reporter,
|
||||
"boundaries.data() returned nullptr for "
|
||||
"operation 'bucketize'.\n");
|
||||
return kTfLiteError;
|
||||
}
|
||||
params->boundaries = boundaries->data();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_RANDOM_UNIFORM: {
|
||||
auto params = safe_allocator.Allocate<TfLiteRandomParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
if (const auto* random_uniform_params =
|
||||
op->builtin_options_as_RandomOptions()) {
|
||||
params->seed = random_uniform_params->seed();
|
||||
params->seed2 = random_uniform_params->seed2();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_GELU: {
|
||||
auto params = safe_allocator.Allocate<TfLiteGeluParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) {
|
||||
params->approximate = gelu_params->approximate();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
@@ -844,8 +891,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
case BuiltinOperator_HASHTABLE_FIND:
|
||||
case BuiltinOperator_HASHTABLE_IMPORT:
|
||||
case BuiltinOperator_HASHTABLE_SIZE:
|
||||
case BuiltinOperator_READ_VARIABLE:
|
||||
case BuiltinOperator_ASSIGN_VARIABLE:
|
||||
case BuiltinOperator_BROADCAST_ARGS:
|
||||
return kTfLiteOk;
|
||||
case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
|
||||
@@ -1003,6 +1048,14 @@ TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
// We have this parse function instead of directly returning kTfLiteOk from the
|
||||
// switch-case in ParseOpData because this function is used as part of the
|
||||
// selective registration for the OpResolver implementation in micro.
|
||||
TfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*,
|
||||
BuiltinDataAllocator*, void**) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
// We have this parse function instead of directly returning kTfLiteOk from the
|
||||
// switch-case in ParseOpData because this function is used as part of the
|
||||
// selective registration for the OpResolver implementation in micro.
|
||||
@@ -1032,6 +1085,33 @@ TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data) {
|
||||
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
||||
|
||||
SafeBuiltinDataAllocator safe_allocator(allocator);
|
||||
std::unique_ptr<TfLiteCallOnceParams,
|
||||
SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
||||
params = safe_allocator.Allocate<TfLiteCallOnceParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
|
||||
const CallOnceOptions* schema_params =
|
||||
op->builtin_options_as_CallOnceOptions();
|
||||
|
||||
if (schema_params != nullptr) {
|
||||
params->init_subgraph_index = schema_params->init_subgraph_index();
|
||||
|
||||
} else {
|
||||
// TODO(b/157480169): We should either return kTfLiteError or fill in some
|
||||
// reasonable defaults in the params struct. We are not doing so until we
|
||||
// better undertand the ramifications of changing the legacy behavior.
|
||||
}
|
||||
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
// We have this parse function instead of directly returning kTfLiteOk from the
|
||||
// switch-case in ParseOpData because this function is used as part of the
|
||||
// selective registration for the OpResolver implementation in micro.
|
||||
@@ -1541,6 +1621,32 @@ TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data) {
|
||||
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
||||
|
||||
SafeBuiltinDataAllocator safe_allocator(allocator);
|
||||
std::unique_ptr<TfLiteMirrorPaddingParams,
|
||||
SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
||||
params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
|
||||
const MirrorPadOptions* schema_params =
|
||||
op->builtin_options_as_MirrorPadOptions();
|
||||
|
||||
if (schema_params != nullptr) {
|
||||
params->mode = ConvertMirrorPadding(schema_params->mode());
|
||||
} else {
|
||||
// TODO(b/157480169): We should either return kTfLiteError or fill in some
|
||||
// reasonable defaults in the params struct. We are not doing so until we
|
||||
// better undertand the ramifications of changing the legacy behavior.
|
||||
}
|
||||
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data) {
|
||||
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
||||
@@ -1676,6 +1782,14 @@ TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
// We have this parse function instead of directly returning kTfLiteOk from the
|
||||
// switch-case in ParseOpData because this function is used as part of the
|
||||
// selective registration for the OpResolver implementation in micro.
|
||||
TfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*,
|
||||
BuiltinDataAllocator*, void**) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data) {
|
||||
@@ -1856,6 +1970,14 @@ TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
// We have this parse function instead of directly returning kTfLiteOk from the
|
||||
// switch-case in ParseOpData because this function is used as part of the
|
||||
// selective registration for the OpResolver implementation in micro.
|
||||
TfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
||||
void**) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data) {
|
||||
@@ -1962,6 +2084,29 @@ TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data) {
|
||||
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
||||
SafeBuiltinDataAllocator safe_allocator(allocator);
|
||||
auto params =
|
||||
safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
if (const auto* seq_lstm_params =
|
||||
op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
|
||||
params->activation =
|
||||
ConvertActivation(seq_lstm_params->fused_activation_function());
|
||||
params->cell_clip = seq_lstm_params->cell_clip();
|
||||
params->proj_clip = seq_lstm_params->proj_clip();
|
||||
params->time_major = seq_lstm_params->time_major();
|
||||
params->asymmetric_quantize_inputs =
|
||||
seq_lstm_params->asymmetric_quantize_inputs();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data) {
|
||||
@@ -2161,6 +2306,37 @@ TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data) {
|
||||
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
||||
|
||||
SafeBuiltinDataAllocator safe_allocator(allocator);
|
||||
std::unique_ptr<TfLiteVarHandleParams,
|
||||
SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
||||
params = safe_allocator.Allocate<TfLiteVarHandleParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
|
||||
const VarHandleOptions* schema_params =
|
||||
op->builtin_options_as_VarHandleOptions();
|
||||
|
||||
if (schema_params != nullptr) {
|
||||
if (schema_params->container()) {
|
||||
params->container = schema_params->container()->c_str();
|
||||
}
|
||||
if (schema_params->shared_name()) {
|
||||
params->shared_name = schema_params->shared_name()->c_str();
|
||||
}
|
||||
} else {
|
||||
// TODO(b/157480169): We should either return kTfLiteError or fill in some
|
||||
// reasonable defaults in the params struct. We are not doing so until we
|
||||
// better undertand the ramifications of changing the legacy behavior.
|
||||
}
|
||||
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
// We have this parse function instead of directly returning kTfLiteOk from the
|
||||
// switch-case in ParseOpData because this function is used as part of the
|
||||
// selective registration for the OpResolver implementation in micro.
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -84,6 +84,11 @@ TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
|
||||
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseAssignVariable(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
@@ -93,6 +98,10 @@ TfLiteStatus ParseBatchToSpaceNd(const Operator* op,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
@@ -229,6 +238,10 @@ TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
|
||||
TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
@@ -261,6 +274,11 @@ TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseReadVariable(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
@@ -295,6 +313,9 @@ TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
|
||||
TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSlice(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
@@ -349,6 +370,15 @@ TfLiteStatus ParseTransposeConv(const Operator* op,
|
||||
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
@@ -15,6 +15,7 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
|
||||
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
@@ -36,15 +37,26 @@ class OpResolver {
|
||||
virtual const TfLiteRegistration* FindOp(const char* op,
|
||||
int version) const = 0;
|
||||
|
||||
using TfLiteDelegatePtrVector =
|
||||
std::vector<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>>;
|
||||
// Returns optional delegates for resolving and handling ops in the flatbuffer
|
||||
// model. This may be used in addition to the standard TfLiteRegistration
|
||||
// lookup for graph resolution.
|
||||
using TfLiteDelegatePtrVector =
|
||||
std::vector<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>>;
|
||||
// WARNING: This API is deprecated, GetDelegateCreators is preferred.
|
||||
virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const {
|
||||
return TfLiteDelegatePtrVector();
|
||||
return {};
|
||||
}
|
||||
|
||||
// Represent a function that creates a TfLite delegate instance.
|
||||
using TfLiteDelegateCreator =
|
||||
std::function<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>(
|
||||
int /*num_threads*/)>;
|
||||
using TfLiteDelegateCreators = std::vector<TfLiteDelegateCreator>;
|
||||
// Returns a vector of delegate creators to create optional delegates for
|
||||
// resolving and handling ops in the flatbuffer model. This may be used in
|
||||
// addition to the standard TfLiteRegistration lookup for graph resolution.
|
||||
virtual TfLiteDelegateCreators GetDelegateCreators() const { return {}; }
|
||||
|
||||
virtual ~OpResolver() {}
|
||||
|
||||
private:
|
||||
@@ -0,0 +1,102 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
#include <cstdint>
|
||||
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
static inline int CountLeadingZeros32Slow(uint64_t n) {
|
||||
int zeroes = 28;
|
||||
if (n >> 16) zeroes -= 16, n >>= 16;
|
||||
if (n >> 8) zeroes -= 8, n >>= 8;
|
||||
if (n >> 4) zeroes -= 4, n >>= 4;
|
||||
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
|
||||
}
|
||||
|
||||
static inline int CountLeadingZeros32(uint32_t n) {
|
||||
#if defined(_MSC_VER)
|
||||
unsigned long result = 0; // NOLINT(runtime/int)
|
||||
if (_BitScanReverse(&result, n)) {
|
||||
return 31 - result;
|
||||
}
|
||||
return 32;
|
||||
#elif defined(__GNUC__)
|
||||
|
||||
// Handle 0 as a special case because __builtin_clz(0) is undefined.
|
||||
if (n == 0) {
|
||||
return 32;
|
||||
}
|
||||
return __builtin_clz(n);
|
||||
#else
|
||||
return CountLeadingZeros32Slow(n);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int MostSignificantBit32(uint32_t n) {
|
||||
return 32 - CountLeadingZeros32(n);
|
||||
}
|
||||
|
||||
static inline int CountLeadingZeros64Slow(uint64_t n) {
|
||||
int zeroes = 60;
|
||||
if (n >> 32) zeroes -= 32, n >>= 32;
|
||||
if (n >> 16) zeroes -= 16, n >>= 16;
|
||||
if (n >> 8) zeroes -= 8, n >>= 8;
|
||||
if (n >> 4) zeroes -= 4, n >>= 4;
|
||||
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
|
||||
}
|
||||
|
||||
static inline int CountLeadingZeros64(uint64_t n) {
|
||||
#if defined(_MSC_VER) && defined(_M_X64)
|
||||
// MSVC does not have __builtin_clzll. Use _BitScanReverse64.
|
||||
unsigned long result = 0; // NOLINT(runtime/int)
|
||||
if (_BitScanReverse64(&result, n)) {
|
||||
return 63 - result;
|
||||
}
|
||||
return 64;
|
||||
#elif defined(_MSC_VER)
|
||||
// MSVC does not have __builtin_clzll. Compose two calls to _BitScanReverse
|
||||
unsigned long result = 0; // NOLINT(runtime/int)
|
||||
if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
|
||||
return 31 - result;
|
||||
}
|
||||
if (_BitScanReverse(&result, n)) {
|
||||
return 63 - result;
|
||||
}
|
||||
return 64;
|
||||
#elif defined(__GNUC__)
|
||||
|
||||
// Handle 0 as a special case because __builtin_clzll(0) is undefined.
|
||||
if (n == 0) {
|
||||
return 64;
|
||||
}
|
||||
return __builtin_clzll(n);
|
||||
#else
|
||||
return CountLeadingZeros64Slow(n);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int MostSignificantBit64(uint64_t n) {
|
||||
return 64 - CountLeadingZeros64(n);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
|
||||
@@ -0,0 +1,52 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
|
||||
void FftCompute(struct FftState* state, const int16_t* input,
|
||||
int input_scale_shift) {
|
||||
const size_t input_size = state->input_size;
|
||||
const size_t fft_size = state->fft_size;
|
||||
|
||||
int16_t* fft_input = state->input;
|
||||
// First, scale the input by the given shift.
|
||||
size_t i;
|
||||
for (i = 0; i < input_size; ++i) {
|
||||
fft_input[i] = static_cast<int16_t>(static_cast<uint16_t>(input[i])
|
||||
<< input_scale_shift);
|
||||
}
|
||||
// Zero out whatever else remains in the top part of the input.
|
||||
for (; i < fft_size; ++i) {
|
||||
fft_input[i] = 0;
|
||||
}
|
||||
|
||||
// Apply the FFT.
|
||||
kissfft_fixed16::kiss_fftr(
|
||||
reinterpret_cast<kissfft_fixed16::kiss_fftr_cfg>(state->scratch),
|
||||
state->input,
|
||||
reinterpret_cast<kissfft_fixed16::kiss_fft_cpx*>(state->output));
|
||||
}
|
||||
|
||||
void FftInit(struct FftState* state) {
|
||||
// All the initialization is done in FftPopulateState()
|
||||
}
|
||||
|
||||
void FftReset(struct FftState* state) {
|
||||
memset(state->input, 0, state->fft_size * sizeof(*state->input));
|
||||
memset(state->output, 0, (state->fft_size / 2 + 1) * sizeof(*state->output));
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct complex_int16_t {
|
||||
int16_t real;
|
||||
int16_t imag;
|
||||
};
|
||||
|
||||
struct FftState {
|
||||
int16_t* input;
|
||||
struct complex_int16_t* output;
|
||||
size_t fft_size;
|
||||
size_t input_size;
|
||||
void* scratch;
|
||||
size_t scratch_size;
|
||||
};
|
||||
|
||||
void FftCompute(struct FftState* state, const int16_t* input,
|
||||
int input_scale_shift);
|
||||
|
||||
void FftInit(struct FftState* state);
|
||||
|
||||
void FftReset(struct FftState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
|
||||
@@ -0,0 +1,69 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
int FftPopulateState(struct FftState* state, size_t input_size) {
|
||||
state->input_size = input_size;
|
||||
state->fft_size = 1;
|
||||
while (state->fft_size < state->input_size) {
|
||||
state->fft_size <<= 1;
|
||||
}
|
||||
|
||||
state->input = reinterpret_cast<int16_t*>(
|
||||
malloc(state->fft_size * sizeof(*state->input)));
|
||||
if (state->input == nullptr) {
|
||||
fprintf(stderr, "Failed to alloc fft input buffer\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
state->output = reinterpret_cast<complex_int16_t*>(
|
||||
malloc((state->fft_size / 2 + 1) * sizeof(*state->output) * 2));
|
||||
if (state->output == nullptr) {
|
||||
fprintf(stderr, "Failed to alloc fft output buffer\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Ask kissfft how much memory it wants.
|
||||
size_t scratch_size = 0;
|
||||
kissfft_fixed16::kiss_fftr_cfg kfft_cfg = kissfft_fixed16::kiss_fftr_alloc(
|
||||
state->fft_size, 0, nullptr, &scratch_size);
|
||||
if (kfft_cfg != nullptr) {
|
||||
fprintf(stderr, "Kiss memory sizing failed.\n");
|
||||
return 0;
|
||||
}
|
||||
state->scratch = malloc(scratch_size);
|
||||
if (state->scratch == nullptr) {
|
||||
fprintf(stderr, "Failed to alloc fft scratch buffer\n");
|
||||
return 0;
|
||||
}
|
||||
state->scratch_size = scratch_size;
|
||||
// Let kissfft configure the scratch space we just allocated
|
||||
kfft_cfg = kissfft_fixed16::kiss_fftr_alloc(state->fft_size, 0,
|
||||
state->scratch, &scratch_size);
|
||||
if (kfft_cfg != state->scratch) {
|
||||
fprintf(stderr, "Kiss memory preallocation strategy failed.\n");
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
void FftFreeStateContents(struct FftState* state) {
|
||||
free(state->input);
|
||||
free(state->output);
|
||||
free(state->scratch);
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Prepares and FFT for the given input size.
|
||||
int FftPopulateState(struct FftState* state, size_t input_size);
|
||||
|
||||
// Frees any allocated buffers.
|
||||
void FftFreeStateContents(struct FftState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
|
||||
@@ -0,0 +1,134 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
|
||||
|
||||
void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
|
||||
struct complex_int16_t* fft_output,
|
||||
int32_t* energy) {
|
||||
const int end_index = state->end_index;
|
||||
int i;
|
||||
energy += state->start_index;
|
||||
fft_output += state->start_index;
|
||||
for (i = state->start_index; i < end_index; ++i) {
|
||||
const int32_t real = fft_output->real;
|
||||
const int32_t imag = fft_output->imag;
|
||||
fft_output++;
|
||||
const uint32_t mag_squared = (real * real) + (imag * imag);
|
||||
*energy++ = mag_squared;
|
||||
}
|
||||
}
|
||||
|
||||
void FilterbankAccumulateChannels(struct FilterbankState* state,
|
||||
const int32_t* energy) {
|
||||
uint64_t* work = state->work;
|
||||
uint64_t weight_accumulator = 0;
|
||||
uint64_t unweight_accumulator = 0;
|
||||
|
||||
const int16_t* channel_frequency_starts = state->channel_frequency_starts;
|
||||
const int16_t* channel_weight_starts = state->channel_weight_starts;
|
||||
const int16_t* channel_widths = state->channel_widths;
|
||||
|
||||
int num_channels_plus_1 = state->num_channels + 1;
|
||||
int i;
|
||||
for (i = 0; i < num_channels_plus_1; ++i) {
|
||||
const int32_t* magnitudes = energy + *channel_frequency_starts++;
|
||||
const int16_t* weights = state->weights + *channel_weight_starts;
|
||||
const int16_t* unweights = state->unweights + *channel_weight_starts++;
|
||||
const int width = *channel_widths++;
|
||||
int j;
|
||||
for (j = 0; j < width; ++j) {
|
||||
weight_accumulator += *weights++ * ((uint64_t)*magnitudes);
|
||||
unweight_accumulator += *unweights++ * ((uint64_t)*magnitudes);
|
||||
++magnitudes;
|
||||
}
|
||||
*work++ = weight_accumulator;
|
||||
weight_accumulator = unweight_accumulator;
|
||||
unweight_accumulator = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static uint16_t Sqrt32(uint32_t num) {
|
||||
if (num == 0) {
|
||||
return 0;
|
||||
}
|
||||
uint32_t res = 0;
|
||||
int max_bit_number = 32 - MostSignificantBit32(num);
|
||||
max_bit_number |= 1;
|
||||
uint32_t bit = 1U << (31 - max_bit_number);
|
||||
int iterations = (31 - max_bit_number) / 2 + 1;
|
||||
while (iterations--) {
|
||||
if (num >= res + bit) {
|
||||
num -= res + bit;
|
||||
res = (res >> 1U) + bit;
|
||||
} else {
|
||||
res >>= 1U;
|
||||
}
|
||||
bit >>= 2U;
|
||||
}
|
||||
// Do rounding - if we have the bits.
|
||||
if (num > res && res != 0xFFFF) {
|
||||
++res;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static uint32_t Sqrt64(uint64_t num) {
|
||||
// Take a shortcut and just use 32 bit operations if the upper word is all
|
||||
// clear. This will cause a slight off by one issue for numbers close to 2^32,
|
||||
// but it probably isn't going to matter (and gives us a big performance win).
|
||||
if ((num >> 32) == 0) {
|
||||
return Sqrt32((uint32_t)num);
|
||||
}
|
||||
uint64_t res = 0;
|
||||
int max_bit_number = 64 - MostSignificantBit64(num);
|
||||
max_bit_number |= 1;
|
||||
uint64_t bit = 1ULL << (63 - max_bit_number);
|
||||
int iterations = (63 - max_bit_number) / 2 + 1;
|
||||
while (iterations--) {
|
||||
if (num >= res + bit) {
|
||||
num -= res + bit;
|
||||
res = (res >> 1U) + bit;
|
||||
} else {
|
||||
res >>= 1U;
|
||||
}
|
||||
bit >>= 2U;
|
||||
}
|
||||
// Do rounding - if we have the bits.
|
||||
if (num > res && res != 0xFFFFFFFFLL) {
|
||||
++res;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift) {
|
||||
const int num_channels = state->num_channels;
|
||||
const uint64_t* work = state->work + 1;
|
||||
// Reuse the work buffer since we're fine clobbering it at this point to hold
|
||||
// the output.
|
||||
uint32_t* output = (uint32_t*)state->work;
|
||||
int i;
|
||||
for (i = 0; i < num_channels; ++i) {
|
||||
*output++ = Sqrt64(*work++) >> scale_down_shift;
|
||||
}
|
||||
return (uint32_t*)state->work;
|
||||
}
|
||||
|
||||
void FilterbankReset(struct FilterbankState* state) {
|
||||
memset(state->work, 0, (state->num_channels + 1) * sizeof(*state->work));
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
|
||||
|
||||
#define kFilterbankBits 12
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct FilterbankState {
|
||||
int num_channels;
|
||||
int start_index;
|
||||
int end_index;
|
||||
int16_t* channel_frequency_starts;
|
||||
int16_t* channel_weight_starts;
|
||||
int16_t* channel_widths;
|
||||
int16_t* weights;
|
||||
int16_t* unweights;
|
||||
uint64_t* work;
|
||||
};
|
||||
|
||||
// Converts the relevant complex values of an FFT output into energy (the
|
||||
// square magnitude).
|
||||
void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
|
||||
struct complex_int16_t* fft_output,
|
||||
int32_t* energy);
|
||||
|
||||
// Computes the mel-scale filterbank on the given energy array. Output is cached
|
||||
// internally - to fetch it, you need to call FilterbankSqrt.
|
||||
void FilterbankAccumulateChannels(struct FilterbankState* state,
|
||||
const int32_t* energy);
|
||||
|
||||
// Applies an integer square root to the 64 bit intermediate values of the
|
||||
// filterbank, and returns a pointer to them. Memory will be invalidated the
|
||||
// next time FilterbankAccumulateChannels is called.
|
||||
uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift);
|
||||
|
||||
void FilterbankReset(struct FilterbankState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
|
||||
@@ -0,0 +1,220 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#define kFilterbankIndexAlignment 4
|
||||
#define kFilterbankChannelBlockSize 4
|
||||
|
||||
void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config) {
|
||||
config->num_channels = 32;
|
||||
config->lower_band_limit = 125.0f;
|
||||
config->upper_band_limit = 7500.0f;
|
||||
config->output_scale_shift = 7;
|
||||
}
|
||||
|
||||
static float FreqToMel(float freq) { return 1127.0 * log1p(freq / 700.0); }
|
||||
|
||||
static void CalculateCenterFrequencies(const int num_channels,
|
||||
const float lower_frequency_limit,
|
||||
const float upper_frequency_limit,
|
||||
float* center_frequencies) {
|
||||
assert(lower_frequency_limit >= 0.0f);
|
||||
assert(upper_frequency_limit > lower_frequency_limit);
|
||||
|
||||
const float mel_low = FreqToMel(lower_frequency_limit);
|
||||
const float mel_hi = FreqToMel(upper_frequency_limit);
|
||||
const float mel_span = mel_hi - mel_low;
|
||||
const float mel_spacing = mel_span / ((float)num_channels);
|
||||
int i;
|
||||
for (i = 0; i < num_channels; ++i) {
|
||||
center_frequencies[i] = mel_low + (mel_spacing * (i + 1));
|
||||
}
|
||||
}
|
||||
|
||||
static void QuantizeFilterbankWeights(const float float_weight, int16_t* weight,
|
||||
int16_t* unweight) {
|
||||
*weight = floor(float_weight * (1 << kFilterbankBits) + 0.5);
|
||||
*unweight = floor((1.0 - float_weight) * (1 << kFilterbankBits) + 0.5);
|
||||
}
|
||||
|
||||
int FilterbankPopulateState(const struct FilterbankConfig* config,
|
||||
struct FilterbankState* state, int sample_rate,
|
||||
int spectrum_size) {
|
||||
state->num_channels = config->num_channels;
|
||||
const int num_channels_plus_1 = config->num_channels + 1;
|
||||
|
||||
// How should we align things to index counts given the byte alignment?
|
||||
const int index_alignment =
|
||||
(kFilterbankIndexAlignment < sizeof(int16_t)
|
||||
? 1
|
||||
: kFilterbankIndexAlignment / sizeof(int16_t));
|
||||
|
||||
state->channel_frequency_starts =
|
||||
malloc(num_channels_plus_1 * sizeof(*state->channel_frequency_starts));
|
||||
state->channel_weight_starts =
|
||||
malloc(num_channels_plus_1 * sizeof(*state->channel_weight_starts));
|
||||
state->channel_widths =
|
||||
malloc(num_channels_plus_1 * sizeof(*state->channel_widths));
|
||||
state->work = malloc(num_channels_plus_1 * sizeof(*state->work));
|
||||
|
||||
float* center_mel_freqs =
|
||||
malloc(num_channels_plus_1 * sizeof(*center_mel_freqs));
|
||||
int16_t* actual_channel_starts =
|
||||
malloc(num_channels_plus_1 * sizeof(*actual_channel_starts));
|
||||
int16_t* actual_channel_widths =
|
||||
malloc(num_channels_plus_1 * sizeof(*actual_channel_widths));
|
||||
|
||||
if (state->channel_frequency_starts == NULL ||
|
||||
state->channel_weight_starts == NULL || state->channel_widths == NULL ||
|
||||
center_mel_freqs == NULL || actual_channel_starts == NULL ||
|
||||
actual_channel_widths == NULL) {
|
||||
free(center_mel_freqs);
|
||||
free(actual_channel_starts);
|
||||
free(actual_channel_widths);
|
||||
fprintf(stderr, "Failed to allocate channel buffers\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
CalculateCenterFrequencies(num_channels_plus_1, config->lower_band_limit,
|
||||
config->upper_band_limit, center_mel_freqs);
|
||||
|
||||
// Always exclude DC.
|
||||
const float hz_per_sbin = 0.5 * sample_rate / ((float)spectrum_size - 1);
|
||||
state->start_index = 1.5 + config->lower_band_limit / hz_per_sbin;
|
||||
state->end_index = 0; // Initialized to zero here, but actually set below.
|
||||
|
||||
// For each channel, we need to figure out what frequencies belong to it, and
|
||||
// how much padding we need to add so that we can efficiently multiply the
|
||||
// weights and unweights for accumulation. To simplify the multiplication
|
||||
// logic, all channels will have some multiplication to do (even if there are
|
||||
// no frequencies that accumulate to that channel) - they will be directed to
|
||||
// a set of zero weights.
|
||||
int chan_freq_index_start = state->start_index;
|
||||
int weight_index_start = 0;
|
||||
int needs_zeros = 0;
|
||||
|
||||
int chan;
|
||||
for (chan = 0; chan < num_channels_plus_1; ++chan) {
|
||||
// Keep jumping frequencies until we overshoot the bound on this channel.
|
||||
int freq_index = chan_freq_index_start;
|
||||
while (FreqToMel((freq_index)*hz_per_sbin) <= center_mel_freqs[chan]) {
|
||||
++freq_index;
|
||||
}
|
||||
|
||||
const int width = freq_index - chan_freq_index_start;
|
||||
actual_channel_starts[chan] = chan_freq_index_start;
|
||||
actual_channel_widths[chan] = width;
|
||||
|
||||
if (width == 0) {
|
||||
// This channel doesn't actually get anything from the frequencies, it's
|
||||
// always zero. We need then to insert some 'zero' weights into the
|
||||
// output, and just redirect this channel to do a single multiplication at
|
||||
// this point. For simplicity, the zeros are placed at the beginning of
|
||||
// the weights arrays, so we have to go and update all the other
|
||||
// weight_starts to reflect this shift (but only once).
|
||||
state->channel_frequency_starts[chan] = 0;
|
||||
state->channel_weight_starts[chan] = 0;
|
||||
state->channel_widths[chan] = kFilterbankChannelBlockSize;
|
||||
if (!needs_zeros) {
|
||||
needs_zeros = 1;
|
||||
int j;
|
||||
for (j = 0; j < chan; ++j) {
|
||||
state->channel_weight_starts[j] += kFilterbankChannelBlockSize;
|
||||
}
|
||||
weight_index_start += kFilterbankChannelBlockSize;
|
||||
}
|
||||
} else {
|
||||
// How far back do we need to go to ensure that we have the proper
|
||||
// alignment?
|
||||
const int aligned_start =
|
||||
(chan_freq_index_start / index_alignment) * index_alignment;
|
||||
const int aligned_width = (chan_freq_index_start - aligned_start + width);
|
||||
const int padded_width =
|
||||
(((aligned_width - 1) / kFilterbankChannelBlockSize) + 1) *
|
||||
kFilterbankChannelBlockSize;
|
||||
|
||||
state->channel_frequency_starts[chan] = aligned_start;
|
||||
state->channel_weight_starts[chan] = weight_index_start;
|
||||
state->channel_widths[chan] = padded_width;
|
||||
weight_index_start += padded_width;
|
||||
}
|
||||
chan_freq_index_start = freq_index;
|
||||
}
|
||||
|
||||
// Allocate the two arrays to store the weights - weight_index_start contains
|
||||
// the index of what would be the next set of weights that we would need to
|
||||
// add, so that's how many weights we need to allocate.
|
||||
state->weights = calloc(weight_index_start, sizeof(*state->weights));
|
||||
state->unweights = calloc(weight_index_start, sizeof(*state->unweights));
|
||||
|
||||
// If the alloc failed, we also need to nuke the arrays.
|
||||
if (state->weights == NULL || state->unweights == NULL) {
|
||||
free(center_mel_freqs);
|
||||
free(actual_channel_starts);
|
||||
free(actual_channel_widths);
|
||||
fprintf(stderr, "Failed to allocate weights or unweights\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Next pass, compute all the weights. Since everything has been memset to
|
||||
// zero, we only need to fill in the weights that correspond to some frequency
|
||||
// for a channel.
|
||||
const float mel_low = FreqToMel(config->lower_band_limit);
|
||||
for (chan = 0; chan < num_channels_plus_1; ++chan) {
|
||||
int frequency = actual_channel_starts[chan];
|
||||
const int num_frequencies = actual_channel_widths[chan];
|
||||
const int frequency_offset =
|
||||
frequency - state->channel_frequency_starts[chan];
|
||||
const int weight_start = state->channel_weight_starts[chan];
|
||||
const float denom_val = (chan == 0) ? mel_low : center_mel_freqs[chan - 1];
|
||||
|
||||
int j;
|
||||
for (j = 0; j < num_frequencies; ++j, ++frequency) {
|
||||
const float weight =
|
||||
(center_mel_freqs[chan] - FreqToMel(frequency * hz_per_sbin)) /
|
||||
(center_mel_freqs[chan] - denom_val);
|
||||
|
||||
// Make the float into an integer for the weights (and unweights).
|
||||
const int weight_index = weight_start + frequency_offset + j;
|
||||
QuantizeFilterbankWeights(weight, state->weights + weight_index,
|
||||
state->unweights + weight_index);
|
||||
}
|
||||
if (frequency > state->end_index) {
|
||||
state->end_index = frequency;
|
||||
}
|
||||
}
|
||||
|
||||
free(center_mel_freqs);
|
||||
free(actual_channel_starts);
|
||||
free(actual_channel_widths);
|
||||
if (state->end_index >= spectrum_size) {
|
||||
fprintf(stderr, "Filterbank end_index is above spectrum size.\n");
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
void FilterbankFreeStateContents(struct FilterbankState* state) {
|
||||
free(state->channel_frequency_starts);
|
||||
free(state->channel_weight_starts);
|
||||
free(state->channel_widths);
|
||||
free(state->weights);
|
||||
free(state->unweights);
|
||||
free(state->work);
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct FilterbankConfig {
|
||||
// number of frequency channel buckets for filterbank
|
||||
int num_channels;
|
||||
// maximum frequency to include
|
||||
float upper_band_limit;
|
||||
// minimum frequency to include
|
||||
float lower_band_limit;
|
||||
// unused
|
||||
int output_scale_shift;
|
||||
};
|
||||
|
||||
// Fills the frontendConfig with "sane" defaults.
|
||||
void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config);
|
||||
|
||||
// Allocates any buffers.
|
||||
int FilterbankPopulateState(const struct FilterbankConfig* config,
|
||||
struct FilterbankState* state, int sample_rate,
|
||||
int spectrum_size);
|
||||
|
||||
// Frees any allocated buffers.
|
||||
void FilterbankFreeStateContents(struct FilterbankState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
|
||||
@@ -0,0 +1,72 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
|
||||
|
||||
struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
|
||||
const int16_t* samples,
|
||||
size_t num_samples,
|
||||
size_t* num_samples_read) {
|
||||
struct FrontendOutput output;
|
||||
output.values = NULL;
|
||||
output.size = 0;
|
||||
|
||||
// Try to apply the window - if it fails, return and wait for more data.
|
||||
if (!WindowProcessSamples(&state->window, samples, num_samples,
|
||||
num_samples_read)) {
|
||||
return output;
|
||||
}
|
||||
|
||||
// Apply the FFT to the window's output (and scale it so that the fixed point
|
||||
// FFT can have as much resolution as possible).
|
||||
int input_shift =
|
||||
15 - MostSignificantBit32(state->window.max_abs_output_value);
|
||||
FftCompute(&state->fft, state->window.output, input_shift);
|
||||
|
||||
// We can re-ruse the fft's output buffer to hold the energy.
|
||||
int32_t* energy = (int32_t*)state->fft.output;
|
||||
|
||||
FilterbankConvertFftComplexToEnergy(&state->filterbank, state->fft.output,
|
||||
energy);
|
||||
|
||||
FilterbankAccumulateChannels(&state->filterbank, energy);
|
||||
uint32_t* scaled_filterbank = FilterbankSqrt(&state->filterbank, input_shift);
|
||||
|
||||
// Apply noise reduction.
|
||||
NoiseReductionApply(&state->noise_reduction, scaled_filterbank);
|
||||
|
||||
if (state->pcan_gain_control.enable_pcan) {
|
||||
PcanGainControlApply(&state->pcan_gain_control, scaled_filterbank);
|
||||
}
|
||||
|
||||
// Apply the log and scale.
|
||||
int correction_bits =
|
||||
MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2);
|
||||
uint16_t* logged_filterbank =
|
||||
LogScaleApply(&state->log_scale, scaled_filterbank,
|
||||
state->filterbank.num_channels, correction_bits);
|
||||
|
||||
output.size = state->filterbank.num_channels;
|
||||
output.values = logged_filterbank;
|
||||
return output;
|
||||
}
|
||||
|
||||
void FrontendReset(struct FrontendState* state) {
|
||||
WindowReset(&state->window);
|
||||
FftReset(&state->fft);
|
||||
FilterbankReset(&state->filterbank);
|
||||
NoiseReductionReset(&state->noise_reduction);
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct FrontendState {
|
||||
struct WindowState window;
|
||||
struct FftState fft;
|
||||
struct FilterbankState filterbank;
|
||||
struct NoiseReductionState noise_reduction;
|
||||
struct PcanGainControlState pcan_gain_control;
|
||||
struct LogScaleState log_scale;
|
||||
};
|
||||
|
||||
struct FrontendOutput {
|
||||
const uint16_t* values;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
// Main entry point to processing frontend samples. Updates num_samples_read to
|
||||
// contain the number of samples that have been consumed from the input array.
|
||||
// Returns a struct containing the generated output. If not enough samples were
|
||||
// added to generate a feature vector, the returned size will be 0 and the
|
||||
// values pointer will be NULL. Note that the output pointer will be invalidated
|
||||
// as soon as FrontendProcessSamples is called again, so copy the contents
|
||||
// elsewhere if you need to use them later.
|
||||
struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
|
||||
const int16_t* samples,
|
||||
size_t num_samples,
|
||||
size_t* num_samples_read);
|
||||
|
||||
void FrontendReset(struct FrontendState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
|
||||
@@ -0,0 +1,85 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
|
||||
|
||||
void FrontendFillConfigWithDefaults(struct FrontendConfig* config) {
|
||||
WindowFillConfigWithDefaults(&config->window);
|
||||
FilterbankFillConfigWithDefaults(&config->filterbank);
|
||||
NoiseReductionFillConfigWithDefaults(&config->noise_reduction);
|
||||
PcanGainControlFillConfigWithDefaults(&config->pcan_gain_control);
|
||||
LogScaleFillConfigWithDefaults(&config->log_scale);
|
||||
}
|
||||
|
||||
int FrontendPopulateState(const struct FrontendConfig* config,
|
||||
struct FrontendState* state, int sample_rate) {
|
||||
memset(state, 0, sizeof(*state));
|
||||
|
||||
if (!WindowPopulateState(&config->window, &state->window, sample_rate)) {
|
||||
fprintf(stderr, "Failed to populate window state\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!FftPopulateState(&state->fft, state->window.size)) {
|
||||
fprintf(stderr, "Failed to populate fft state\n");
|
||||
return 0;
|
||||
}
|
||||
FftInit(&state->fft);
|
||||
|
||||
if (!FilterbankPopulateState(&config->filterbank, &state->filterbank,
|
||||
sample_rate, state->fft.fft_size / 2 + 1)) {
|
||||
fprintf(stderr, "Failed to populate filterbank state\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!NoiseReductionPopulateState(&config->noise_reduction,
|
||||
&state->noise_reduction,
|
||||
state->filterbank.num_channels)) {
|
||||
fprintf(stderr, "Failed to populate noise reduction state\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int input_correction_bits =
|
||||
MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2);
|
||||
if (!PcanGainControlPopulateState(
|
||||
&config->pcan_gain_control, &state->pcan_gain_control,
|
||||
state->noise_reduction.estimate, state->filterbank.num_channels,
|
||||
state->noise_reduction.smoothing_bits, input_correction_bits)) {
|
||||
fprintf(stderr, "Failed to populate pcan gain control state\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!LogScalePopulateState(&config->log_scale, &state->log_scale)) {
|
||||
fprintf(stderr, "Failed to populate log scale state\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
FrontendReset(state);
|
||||
|
||||
// All good, return a true value.
|
||||
return 1;
|
||||
}
|
||||
|
||||
void FrontendFreeStateContents(struct FrontendState* state) {
|
||||
WindowFreeStateContents(&state->window);
|
||||
FftFreeStateContents(&state->fft);
|
||||
FilterbankFreeStateContents(&state->filterbank);
|
||||
NoiseReductionFreeStateContents(&state->noise_reduction);
|
||||
PcanGainControlFreeStateContents(&state->pcan_gain_control);
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct FrontendConfig {
|
||||
struct WindowConfig window;
|
||||
struct FilterbankConfig filterbank;
|
||||
struct NoiseReductionConfig noise_reduction;
|
||||
struct PcanGainControlConfig pcan_gain_control;
|
||||
struct LogScaleConfig log_scale;
|
||||
};
|
||||
|
||||
// Fills the frontendConfig with "sane" defaults.
|
||||
void FrontendFillConfigWithDefaults(struct FrontendConfig* config);
|
||||
|
||||
// Allocates any buffers.
|
||||
int FrontendPopulateState(const struct FrontendConfig* config,
|
||||
struct FrontendState* state, int sample_rate);
|
||||
|
||||
// Frees any allocated buffers.
|
||||
void FrontendFreeStateContents(struct FrontendState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
|
||||
@@ -0,0 +1,48 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_COMMON_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_COMMON_H_
|
||||
|
||||
// This header file should be included in all variants of kiss_fft_$type.{h,cc}
|
||||
// so that their sub-included source files do not mistakenly wrap libc header
|
||||
// files within their kissfft_$type namespaces.
|
||||
// E.g, This header avoids kissfft_int16.h containing:
|
||||
// namespace kiss_fft_int16 {
|
||||
// #include "kiss_fft.h"
|
||||
// }
|
||||
// where kiss_fft_.h contains:
|
||||
// #include <math.h>
|
||||
//
|
||||
// TRICK: By including the following header files here, their preprocessor
|
||||
// header guards prevent them being re-defined inside of the kiss_fft_$type
|
||||
// namespaces declared within the kiss_fft_$type.{h,cc} sources.
|
||||
// Note that the original kiss_fft*.h files are untouched since they
|
||||
// may be used in libraries that include them directly.
|
||||
|
||||
#include <limits.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#ifdef FIXED_POINT
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
|
||||
#ifdef USE_SIMD
|
||||
#include <xmmintrin.h>
|
||||
#endif
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_COMMON_H_
|
||||
@@ -0,0 +1,8 @@
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h"
|
||||
|
||||
#define FIXED_POINT 16
|
||||
namespace kissfft_fixed16 {
|
||||
#include "kiss_fft.c"
|
||||
#include "tools/kiss_fftr.c"
|
||||
} // namespace kissfft_fixed16
|
||||
#undef FIXED_POINT
|
||||
@@ -0,0 +1,34 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_INT16_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_INT16_H_
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h"
|
||||
|
||||
// Wrap 16-bit kiss fft in its own namespace. Enables us to link an application
|
||||
// with different kiss fft resultions (16/32 bit interger, float, double)
|
||||
// without getting a linker error.
|
||||
#define FIXED_POINT 16
|
||||
namespace kissfft_fixed16 {
|
||||
#include "kiss_fft.h"
|
||||
#include "tools/kiss_fftr.h"
|
||||
} // namespace kissfft_fixed16
|
||||
#undef FIXED_POINT
|
||||
#undef kiss_fft_scalar
|
||||
#undef KISS_FFT_H
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_INT16_H_
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
|
||||
const uint16_t kLogLut[]
|
||||
#ifndef _MSC_VER
|
||||
__attribute__((aligned(4)))
|
||||
#endif // _MSV_VER
|
||||
= {0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163,
|
||||
2329, 2490, 2646, 2797, 2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848,
|
||||
3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633, 4714, 4791, 4864, 4934,
|
||||
5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507,
|
||||
5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633,
|
||||
5626, 5615, 5602, 5586, 5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370,
|
||||
5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000, 4944, 4885, 4825, 4762,
|
||||
4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848,
|
||||
3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659,
|
||||
2549, 2437, 2323, 2207, 2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224,
|
||||
1094, 963, 830, 695, 559, 421, 282, 142, 0, 0};
|
||||
@@ -0,0 +1,40 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Number of segments in the log lookup table. The table will be kLogSegments+1
|
||||
// in length (with some padding).
|
||||
#define kLogSegments 128
|
||||
#define kLogSegmentsLog2 7
|
||||
|
||||
// Scale used by lookup table.
|
||||
#define kLogScale 65536
|
||||
#define kLogScaleLog2 16
|
||||
#define kLogCoeff 45426
|
||||
|
||||
extern const uint16_t kLogLut[];
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
|
||||
@@ -0,0 +1,83 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
|
||||
|
||||
#define kuint16max 0x0000FFFF
|
||||
|
||||
// The following functions implement integer logarithms of various sizes. The
|
||||
// approximation is calculated according to method described in
|
||||
// www.inti.gob.ar/electronicaeinformatica/instrumentacion/utic/
|
||||
// publicaciones/SPL2007/Log10-spl07.pdf
|
||||
// It first calculates log2 of the input and then converts it to natural
|
||||
// logarithm.
|
||||
|
||||
static uint32_t Log2FractionPart(const uint32_t x, const uint32_t log2x) {
|
||||
// Part 1
|
||||
int32_t frac = x - (1LL << log2x);
|
||||
if (log2x < kLogScaleLog2) {
|
||||
frac <<= kLogScaleLog2 - log2x;
|
||||
} else {
|
||||
frac >>= log2x - kLogScaleLog2;
|
||||
}
|
||||
// Part 2
|
||||
const uint32_t base_seg = frac >> (kLogScaleLog2 - kLogSegmentsLog2);
|
||||
const uint32_t seg_unit =
|
||||
(((uint32_t)1) << kLogScaleLog2) >> kLogSegmentsLog2;
|
||||
|
||||
const int32_t c0 = kLogLut[base_seg];
|
||||
const int32_t c1 = kLogLut[base_seg + 1];
|
||||
const int32_t seg_base = seg_unit * base_seg;
|
||||
const int32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> kLogScaleLog2;
|
||||
return frac + c0 + rel_pos;
|
||||
}
|
||||
|
||||
static uint32_t Log(const uint32_t x, const uint32_t scale_shift) {
|
||||
const uint32_t integer = MostSignificantBit32(x) - 1;
|
||||
const uint32_t fraction = Log2FractionPart(x, integer);
|
||||
const uint32_t log2 = (integer << kLogScaleLog2) + fraction;
|
||||
const uint32_t round = kLogScale / 2;
|
||||
const uint32_t loge = (((uint64_t)kLogCoeff) * log2 + round) >> kLogScaleLog2;
|
||||
// Finally scale to our output scale
|
||||
const uint32_t loge_scaled = ((loge << scale_shift) + round) >> kLogScaleLog2;
|
||||
return loge_scaled;
|
||||
}
|
||||
|
||||
uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
|
||||
int signal_size, int correction_bits) {
|
||||
const int scale_shift = state->scale_shift;
|
||||
uint16_t* output = (uint16_t*)signal;
|
||||
uint16_t* ret = output;
|
||||
int i;
|
||||
for (i = 0; i < signal_size; ++i) {
|
||||
uint32_t value = *signal++;
|
||||
if (state->enable_log) {
|
||||
if (correction_bits < 0) {
|
||||
value >>= -correction_bits;
|
||||
} else {
|
||||
value <<= correction_bits;
|
||||
}
|
||||
if (value > 1) {
|
||||
value = Log(value, scale_shift);
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
}
|
||||
*output++ = (value < kuint16max) ? value : kuint16max;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct LogScaleState {
|
||||
int enable_log;
|
||||
int scale_shift;
|
||||
};
|
||||
|
||||
// Applies a fixed point logarithm to the signal and converts it to 16 bit. Note
|
||||
// that the signal array will be modified.
|
||||
uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
|
||||
int signal_size, int correction_bits);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -12,11 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
|
||||
|
||||
#ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
|
||||
#define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
|
||||
void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config) {
|
||||
config->enable_log = 1;
|
||||
config->scale_shift = 6;
|
||||
}
|
||||
|
||||
extern const unsigned char g_keyword_scrambled_model_data[];
|
||||
extern const unsigned int g_keyword_scrambled_model_data_length;
|
||||
|
||||
#endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
|
||||
int LogScalePopulateState(const struct LogScaleConfig* config,
|
||||
struct LogScaleState* state) {
|
||||
state->enable_log = config->enable_log;
|
||||
state->scale_shift = config->scale_shift;
|
||||
return 1;
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct LogScaleConfig {
|
||||
// set to false (0) to disable this module
|
||||
int enable_log;
|
||||
// scale results by 2^(scale_shift)
|
||||
int scale_shift;
|
||||
};
|
||||
|
||||
// Populates the LogScaleConfig with "sane" default values.
|
||||
void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config);
|
||||
|
||||
// Allocates any buffers.
|
||||
int LogScalePopulateState(const struct LogScaleConfig* config,
|
||||
struct LogScaleState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
|
||||
@@ -0,0 +1,51 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal) {
|
||||
int i;
|
||||
for (i = 0; i < state->num_channels; ++i) {
|
||||
const uint32_t smoothing =
|
||||
((i & 1) == 0) ? state->even_smoothing : state->odd_smoothing;
|
||||
const uint32_t one_minus_smoothing = (1 << kNoiseReductionBits) - smoothing;
|
||||
|
||||
// Update the estimate of the noise.
|
||||
const uint32_t signal_scaled_up = signal[i] << state->smoothing_bits;
|
||||
uint32_t estimate =
|
||||
(((uint64_t)signal_scaled_up * smoothing) +
|
||||
((uint64_t)state->estimate[i] * one_minus_smoothing)) >>
|
||||
kNoiseReductionBits;
|
||||
state->estimate[i] = estimate;
|
||||
|
||||
// Make sure that we can't get a negative value for the signal - estimate.
|
||||
if (estimate > signal_scaled_up) {
|
||||
estimate = signal_scaled_up;
|
||||
}
|
||||
|
||||
const uint32_t floor =
|
||||
((uint64_t)signal[i] * state->min_signal_remaining) >>
|
||||
kNoiseReductionBits;
|
||||
const uint32_t subtracted =
|
||||
(signal_scaled_up - estimate) >> state->smoothing_bits;
|
||||
const uint32_t output = subtracted > floor ? subtracted : floor;
|
||||
signal[i] = output;
|
||||
}
|
||||
}
|
||||
|
||||
void NoiseReductionReset(struct NoiseReductionState* state) {
|
||||
memset(state->estimate, 0, sizeof(*state->estimate) * state->num_channels);
|
||||
}
|
||||
@@ -0,0 +1,46 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
|
||||
|
||||
#define kNoiseReductionBits 14
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct NoiseReductionState {
|
||||
int smoothing_bits;
|
||||
uint16_t even_smoothing;
|
||||
uint16_t odd_smoothing;
|
||||
uint16_t min_signal_remaining;
|
||||
int num_channels;
|
||||
uint32_t* estimate;
|
||||
};
|
||||
|
||||
// Removes stationary noise from each channel of the signal using a low pass
|
||||
// filter.
|
||||
void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal);
|
||||
|
||||
void NoiseReductionReset(struct NoiseReductionState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
|
||||
@@ -0,0 +1,45 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config) {
|
||||
config->smoothing_bits = 10;
|
||||
config->even_smoothing = 0.025;
|
||||
config->odd_smoothing = 0.06;
|
||||
config->min_signal_remaining = 0.05;
|
||||
}
|
||||
|
||||
int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
|
||||
struct NoiseReductionState* state,
|
||||
int num_channels) {
|
||||
state->smoothing_bits = config->smoothing_bits;
|
||||
state->odd_smoothing = config->odd_smoothing * (1 << kNoiseReductionBits);
|
||||
state->even_smoothing = config->even_smoothing * (1 << kNoiseReductionBits);
|
||||
state->min_signal_remaining =
|
||||
config->min_signal_remaining * (1 << kNoiseReductionBits);
|
||||
state->num_channels = num_channels;
|
||||
state->estimate = calloc(state->num_channels, sizeof(*state->estimate));
|
||||
if (state->estimate == NULL) {
|
||||
fprintf(stderr, "Failed to alloc estimate buffer\n");
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
void NoiseReductionFreeStateContents(struct NoiseReductionState* state) {
|
||||
free(state->estimate);
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct NoiseReductionConfig {
|
||||
// scale the signal up by 2^(smoothing_bits) before reduction
|
||||
int smoothing_bits;
|
||||
// smoothing coefficient for even-numbered channels
|
||||
float even_smoothing;
|
||||
// smoothing coefficient for odd-numbered channels
|
||||
float odd_smoothing;
|
||||
// fraction of signal to preserve (1.0 disables this module)
|
||||
float min_signal_remaining;
|
||||
};
|
||||
|
||||
// Populates the NoiseReductionConfig with "sane" default values.
|
||||
void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config);
|
||||
|
||||
// Allocates any buffers.
|
||||
int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
|
||||
struct NoiseReductionState* state,
|
||||
int num_channels);
|
||||
|
||||
// Frees any allocated buffers.
|
||||
void NoiseReductionFreeStateContents(struct NoiseReductionState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
|
||||
@@ -0,0 +1,56 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
|
||||
|
||||
int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) {
|
||||
if (x <= 2) {
|
||||
return lut[x];
|
||||
}
|
||||
|
||||
const int16_t interval = MostSignificantBit32(x);
|
||||
lut += 4 * interval - 6;
|
||||
|
||||
const int16_t frac =
|
||||
((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) &
|
||||
0x3FF;
|
||||
|
||||
int32_t result = ((int32_t)lut[2] * frac) >> 5;
|
||||
result += (int32_t)((uint32_t)lut[1] << 5);
|
||||
result *= frac;
|
||||
result = (result + (1 << 14)) >> 15;
|
||||
result += lut[0];
|
||||
return (int16_t)result;
|
||||
}
|
||||
|
||||
uint32_t PcanShrink(const uint32_t x) {
|
||||
if (x < (2 << kPcanSnrBits)) {
|
||||
return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits);
|
||||
} else {
|
||||
return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits);
|
||||
}
|
||||
}
|
||||
|
||||
void PcanGainControlApply(struct PcanGainControlState* state,
|
||||
uint32_t* signal) {
|
||||
int i;
|
||||
for (i = 0; i < state->num_channels; ++i) {
|
||||
const uint32_t gain =
|
||||
WideDynamicFunction(state->noise_estimate[i], state->gain_lut);
|
||||
const uint32_t snr = ((uint64_t)signal[i] * gain) >> state->snr_shift;
|
||||
signal[i] = PcanShrink(snr);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#define kPcanSnrBits 12
|
||||
#define kPcanOutputBits 6
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Details at https://research.google/pubs/pub45911.pdf
|
||||
struct PcanGainControlState {
|
||||
int enable_pcan;
|
||||
uint32_t* noise_estimate;
|
||||
int num_channels;
|
||||
int16_t* gain_lut;
|
||||
int32_t snr_shift;
|
||||
};
|
||||
|
||||
int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut);
|
||||
|
||||
uint32_t PcanShrink(const uint32_t x);
|
||||
|
||||
void PcanGainControlApply(struct PcanGainControlState* state, uint32_t* signal);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
|
||||
@@ -0,0 +1,92 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
|
||||
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#define kint16max 0x00007FFF
|
||||
|
||||
void PcanGainControlFillConfigWithDefaults(
|
||||
struct PcanGainControlConfig* config) {
|
||||
config->enable_pcan = 0;
|
||||
config->strength = 0.95;
|
||||
config->offset = 80.0;
|
||||
config->gain_bits = 21;
|
||||
}
|
||||
|
||||
int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
|
||||
int32_t input_bits, uint32_t x) {
|
||||
const float x_as_float = ((float)x) / ((uint32_t)1 << input_bits);
|
||||
const float gain_as_float =
|
||||
((uint32_t)1 << config->gain_bits) *
|
||||
powf(x_as_float + config->offset, -config->strength);
|
||||
|
||||
if (gain_as_float > kint16max) {
|
||||
return kint16max;
|
||||
}
|
||||
return (int16_t)(gain_as_float + 0.5f);
|
||||
}
|
||||
|
||||
int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
|
||||
struct PcanGainControlState* state,
|
||||
uint32_t* noise_estimate,
|
||||
const int num_channels,
|
||||
const uint16_t smoothing_bits,
|
||||
const int32_t input_correction_bits) {
|
||||
state->enable_pcan = config->enable_pcan;
|
||||
if (!state->enable_pcan) {
|
||||
return 1;
|
||||
}
|
||||
state->noise_estimate = noise_estimate;
|
||||
state->num_channels = num_channels;
|
||||
state->gain_lut = malloc(kWideDynamicFunctionLUTSize * sizeof(int16_t));
|
||||
if (state->gain_lut == NULL) {
|
||||
fprintf(stderr, "Failed to allocate gain LUT\n");
|
||||
return 0;
|
||||
}
|
||||
state->snr_shift = config->gain_bits - input_correction_bits - kPcanSnrBits;
|
||||
|
||||
const int32_t input_bits = smoothing_bits - input_correction_bits;
|
||||
state->gain_lut[0] = PcanGainLookupFunction(config, input_bits, 0);
|
||||
state->gain_lut[1] = PcanGainLookupFunction(config, input_bits, 1);
|
||||
state->gain_lut -= 6;
|
||||
int interval;
|
||||
for (interval = 2; interval <= kWideDynamicFunctionBits; ++interval) {
|
||||
const uint32_t x0 = (uint32_t)1 << (interval - 1);
|
||||
const uint32_t x1 = x0 + (x0 >> 1);
|
||||
const uint32_t x2 =
|
||||
(interval == kWideDynamicFunctionBits) ? x0 + (x0 - 1) : 2 * x0;
|
||||
|
||||
const int16_t y0 = PcanGainLookupFunction(config, input_bits, x0);
|
||||
const int16_t y1 = PcanGainLookupFunction(config, input_bits, x1);
|
||||
const int16_t y2 = PcanGainLookupFunction(config, input_bits, x2);
|
||||
|
||||
const int32_t diff1 = (int32_t)y1 - y0;
|
||||
const int32_t diff2 = (int32_t)y2 - y0;
|
||||
const int32_t a1 = 4 * diff1 - diff2;
|
||||
const int32_t a2 = diff2 - a1;
|
||||
|
||||
state->gain_lut[4 * interval] = y0;
|
||||
state->gain_lut[4 * interval + 1] = (int16_t)a1;
|
||||
state->gain_lut[4 * interval + 2] = (int16_t)a2;
|
||||
}
|
||||
state->gain_lut += 6;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void PcanGainControlFreeStateContents(struct PcanGainControlState* state) {
|
||||
free(state->gain_lut);
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
|
||||
|
||||
#define kWideDynamicFunctionBits 32
|
||||
#define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3)
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct PcanGainControlConfig {
|
||||
// set to false (0) to disable this module
|
||||
int enable_pcan;
|
||||
// gain normalization exponent (0.0 disables, 1.0 full strength)
|
||||
float strength;
|
||||
// positive value added in the normalization denominator
|
||||
float offset;
|
||||
// number of fractional bits in the gain
|
||||
int gain_bits;
|
||||
};
|
||||
|
||||
void PcanGainControlFillConfigWithDefaults(
|
||||
struct PcanGainControlConfig* config);
|
||||
|
||||
int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
|
||||
int32_t input_bits, uint32_t x);
|
||||
|
||||
int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
|
||||
struct PcanGainControlState* state,
|
||||
uint32_t* noise_estimate,
|
||||
const int num_channels,
|
||||
const uint16_t smoothing_bits,
|
||||
const int32_t input_correction_bits);
|
||||
|
||||
void PcanGainControlFreeStateContents(struct PcanGainControlState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
|
||||
@@ -0,0 +1,70 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
|
||||
size_t num_samples, size_t* num_samples_read) {
|
||||
const int size = state->size;
|
||||
|
||||
// Copy samples from the samples buffer over to our local input.
|
||||
size_t max_samples_to_copy = state->size - state->input_used;
|
||||
if (max_samples_to_copy > num_samples) {
|
||||
max_samples_to_copy = num_samples;
|
||||
}
|
||||
memcpy(state->input + state->input_used, samples,
|
||||
max_samples_to_copy * sizeof(*samples));
|
||||
*num_samples_read = max_samples_to_copy;
|
||||
state->input_used += max_samples_to_copy;
|
||||
|
||||
if (state->input_used < state->size) {
|
||||
// We don't have enough samples to compute a window.
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Apply the window to the input.
|
||||
const int16_t* coefficients = state->coefficients;
|
||||
const int16_t* input = state->input;
|
||||
int16_t* output = state->output;
|
||||
int i;
|
||||
int16_t max_abs_output_value = 0;
|
||||
for (i = 0; i < size; ++i) {
|
||||
int16_t new_value =
|
||||
(((int32_t)*input++) * *coefficients++) >> kFrontendWindowBits;
|
||||
*output++ = new_value;
|
||||
if (new_value < 0) {
|
||||
new_value = -new_value;
|
||||
}
|
||||
if (new_value > max_abs_output_value) {
|
||||
max_abs_output_value = new_value;
|
||||
}
|
||||
}
|
||||
// Shuffle the input down by the step size, and update how much we have used.
|
||||
memmove(state->input, state->input + state->step,
|
||||
sizeof(*state->input) * (state->size - state->step));
|
||||
state->input_used -= state->step;
|
||||
state->max_abs_output_value = max_abs_output_value;
|
||||
|
||||
// Indicate that the output buffer is valid for the next stage.
|
||||
return 1;
|
||||
}
|
||||
|
||||
void WindowReset(struct WindowState* state) {
|
||||
memset(state->input, 0, state->size * sizeof(*state->input));
|
||||
memset(state->output, 0, state->size * sizeof(*state->output));
|
||||
state->input_used = 0;
|
||||
state->max_abs_output_value = 0;
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#define kFrontendWindowBits 12
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct WindowState {
|
||||
size_t size;
|
||||
int16_t* coefficients;
|
||||
size_t step;
|
||||
|
||||
int16_t* input;
|
||||
size_t input_used;
|
||||
int16_t* output;
|
||||
int16_t max_abs_output_value;
|
||||
};
|
||||
|
||||
// Applies a window to the samples coming in, stepping forward at the given
|
||||
// rate.
|
||||
int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
|
||||
size_t num_samples, size_t* num_samples_read);
|
||||
|
||||
void WindowReset(struct WindowState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
|
||||
@@ -0,0 +1,73 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
|
||||
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
// Some platforms don't have M_PI
|
||||
#ifndef M_PI
|
||||
#define M_PI 3.14159265358979323846
|
||||
#endif
|
||||
|
||||
void WindowFillConfigWithDefaults(struct WindowConfig* config) {
|
||||
config->size_ms = 25;
|
||||
config->step_size_ms = 10;
|
||||
}
|
||||
|
||||
int WindowPopulateState(const struct WindowConfig* config,
|
||||
struct WindowState* state, int sample_rate) {
|
||||
state->size = config->size_ms * sample_rate / 1000;
|
||||
state->step = config->step_size_ms * sample_rate / 1000;
|
||||
|
||||
state->coefficients = malloc(state->size * sizeof(*state->coefficients));
|
||||
if (state->coefficients == NULL) {
|
||||
fprintf(stderr, "Failed to allocate window coefficients\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Populate the window values.
|
||||
const float arg = M_PI * 2.0 / ((float)state->size);
|
||||
int i;
|
||||
for (i = 0; i < state->size; ++i) {
|
||||
float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5)));
|
||||
// Scale it to fixed point and round it.
|
||||
state->coefficients[i] =
|
||||
floor(float_value * (1 << kFrontendWindowBits) + 0.5);
|
||||
}
|
||||
|
||||
state->input_used = 0;
|
||||
state->input = malloc(state->size * sizeof(*state->input));
|
||||
if (state->input == NULL) {
|
||||
fprintf(stderr, "Failed to allocate window input\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
state->output = malloc(state->size * sizeof(*state->output));
|
||||
if (state->output == NULL) {
|
||||
fprintf(stderr, "Failed to allocate window output\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void WindowFreeStateContents(struct WindowState* state) {
|
||||
free(state->coefficients);
|
||||
free(state->input);
|
||||
free(state->output);
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
|
||||
|
||||
#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct WindowConfig {
|
||||
// length of window frame in milliseconds
|
||||
size_t size_ms;
|
||||
// length of step for next frame in milliseconds
|
||||
size_t step_size_ms;
|
||||
};
|
||||
|
||||
// Populates the WindowConfig with "sane" default values.
|
||||
void WindowFillConfigWithDefaults(struct WindowConfig* config);
|
||||
|
||||
// Allocates any buffers.
|
||||
int WindowPopulateState(const struct WindowConfig* config,
|
||||
struct WindowState* state, int sample_rate);
|
||||
|
||||
// Frees any allocated buffers.
|
||||
void WindowFreeStateContents(struct WindowState* state);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
|
||||
@@ -75,6 +75,7 @@ float ActivationFunction(float x) {
|
||||
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
|
||||
const float* bias_data, int array_size,
|
||||
float* array_data) {
|
||||
if (bias_size == 0) return;
|
||||
// Note: see b/132215220: in May 2019 we thought it would be OK to replace
|
||||
// this with the Eigen one-liner:
|
||||
// return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
|
||||
@@ -138,6 +139,100 @@ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
|
||||
#endif
|
||||
}
|
||||
|
||||
// Single-rounding MultiplyByQuantizedMultiplier
|
||||
#if TFLITE_SINGLE_ROUNDING
|
||||
inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
|
||||
int32_t quantized_multiplier,
|
||||
int shift) {
|
||||
TFLITE_DCHECK(quantized_multiplier >= 0);
|
||||
TFLITE_DCHECK(shift >= -31 && shift <= 30);
|
||||
|
||||
const int64_t total_shift = 31 - shift;
|
||||
const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
|
||||
int64_t result = x * static_cast<int64_t>(quantized_multiplier) + round;
|
||||
result = result >> total_shift;
|
||||
|
||||
TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
|
||||
result <= std::numeric_limits<int32_t>::max());
|
||||
return static_cast<int32_t>(result);
|
||||
}
|
||||
|
||||
inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
int32_t x, int32_t quantized_multiplier, int shift) {
|
||||
TFLITE_DCHECK_LE(shift, 0);
|
||||
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
|
||||
}
|
||||
|
||||
inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
|
||||
int32_t x, int32_t quantized_multiplier, int shift) {
|
||||
TFLITE_DCHECK_GE(shift, 0);
|
||||
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
|
||||
}
|
||||
|
||||
inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
|
||||
int32_t quantized_multiplier,
|
||||
int shift) {
|
||||
// Inputs:
|
||||
// - quantized_multiplier has fixed point at bit 31
|
||||
// - shift is -31 to +7 (negative for right shift)
|
||||
//
|
||||
// Assumptions: The following input ranges are assumed
|
||||
// - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
|
||||
// - scaling is chosen so final scaled result fits in int32_t
|
||||
// - input x is in the range -(1<<47) <= x < (1<<47)
|
||||
TFLITE_DCHECK(quantized_multiplier >= 0);
|
||||
TFLITE_DCHECK(shift >= -31 && shift < 8);
|
||||
TFLITE_DCHECK(x >= -(static_cast<int64_t>(1) << 47) &&
|
||||
x < (static_cast<int64_t>(1) << 47));
|
||||
|
||||
const int32_t reduced_multiplier =
|
||||
(quantized_multiplier < 0x7FFF0000)
|
||||
? ((quantized_multiplier + (1 << 15)) >> 16)
|
||||
: 0x7FFF;
|
||||
const int64_t total_shift = 15 - shift;
|
||||
const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
|
||||
int64_t result = x * static_cast<int64_t>(reduced_multiplier) + round;
|
||||
result = result >> total_shift;
|
||||
|
||||
TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
|
||||
result <= std::numeric_limits<int32_t>::max());
|
||||
return static_cast<int32_t>(result);
|
||||
}
|
||||
|
||||
#ifdef USE_NEON
|
||||
inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
|
||||
int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
|
||||
TFLITE_DCHECK(quantized_multiplier >= 0);
|
||||
|
||||
const int right_shift = std::min(-1, shift);
|
||||
const int left_shift = shift - right_shift;
|
||||
|
||||
const int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier);
|
||||
const int32x4_t left_shift_dup = vdupq_n_s32(left_shift);
|
||||
const int32x4_t right_shift_dup = vdupq_n_s32(right_shift);
|
||||
|
||||
int32x4x4_t result;
|
||||
result.val[0] = vrshlq_s32(
|
||||
vqdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup), multiplier_dup),
|
||||
right_shift_dup);
|
||||
|
||||
result.val[1] = vrshlq_s32(
|
||||
vqdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup), multiplier_dup),
|
||||
right_shift_dup);
|
||||
|
||||
result.val[2] = vrshlq_s32(
|
||||
vqdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup), multiplier_dup),
|
||||
right_shift_dup);
|
||||
|
||||
result.val[3] = vrshlq_s32(
|
||||
vqdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup), multiplier_dup),
|
||||
right_shift_dup);
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif // USE_NEON
|
||||
// Double-rounding MultiplyByQuantizedMultiplier
|
||||
#else
|
||||
inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
int32_t x, int32_t quantized_multiplier, int left_shift) {
|
||||
using gemmlowp::RoundingDivideByPOT;
|
||||
@@ -224,7 +319,8 @@ inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#endif // USE_NEON
|
||||
#endif // TFLITE_SINGLE_ROUNDING
|
||||
|
||||
template <typename T>
|
||||
int CountLeadingZeros(T integer_input) {
|
||||
@@ -0,0 +1,124 @@
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
|
||||
#include "tensorflow/lite/c/builtin_op_data.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define __restrict__ __restrict
|
||||
#endif
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace tensor_utils {
|
||||
|
||||
// Multiplies a matrix with a scalar and reduce the result on each row to a
|
||||
// scalar.
|
||||
// Parameters:
|
||||
// - matrix: matrix of size n_row * n_col
|
||||
// - scalar: the scalar that is multiplied to each element in the matrix
|
||||
// - n_row: the row count of the matrix
|
||||
// - n_col: the column count of the matrix
|
||||
// - output: the 32bit output
|
||||
// Note: We do not need saturation because the int8 * int8 is safe from overflow
|
||||
// in (2^31-1) / (2^14) = 131072, which is bigger than the n_row. Non-zero
|
||||
// initial output value is not exceptionally large.
|
||||
void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar,
|
||||
int32_t n_row, int32_t n_col,
|
||||
int32_t* output);
|
||||
|
||||
// Add another vector for each batch in the batch vector.
|
||||
template <typename T>
|
||||
void VectorBatchVectorAdd(const T* vector, int v_size, int n_batch,
|
||||
T* batch_vector) {
|
||||
for (int b = 0; b < n_batch; b++) {
|
||||
for (int i = 0; i < v_size; ++i) {
|
||||
batch_vector[i] += vector[i];
|
||||
}
|
||||
batch_vector += v_size;
|
||||
}
|
||||
}
|
||||
|
||||
// Cwise product of two vectors.
|
||||
template <typename T>
|
||||
inline void VectorVectorCwiseProduct(const T* __restrict__ vector1,
|
||||
const T* __restrict__ vector2, int v_size,
|
||||
T* __restrict__ result) {
|
||||
for (int v = 0; v < v_size; v++) {
|
||||
*result++ = *vector1++ * *vector2++;
|
||||
}
|
||||
}
|
||||
|
||||
// Cwise product of a vector and a batch-vector.
|
||||
template <typename T>
|
||||
inline void VectorBatchVectorCwiseProduct(const T* vector, int v_size,
|
||||
const T* batch_vector, int n_batch,
|
||||
T* result) {
|
||||
for (int b = 0; b < n_batch; b++) {
|
||||
VectorVectorCwiseProduct(vector, batch_vector, v_size, result);
|
||||
// Update the pointers.
|
||||
result += v_size;
|
||||
batch_vector += v_size;
|
||||
}
|
||||
}
|
||||
|
||||
// Cwise product and accumulate of two vectors. Since it's a MAC operation, the
|
||||
// assumption here is that result array is initialized to valid values.
|
||||
template <typename T>
|
||||
inline void VectorVectorCwiseProductAccumulate(const T* __restrict__ vector1,
|
||||
const T* __restrict__ vector2,
|
||||
int v_size,
|
||||
T* __restrict__ result) {
|
||||
for (int v = 0; v < v_size; v++) {
|
||||
*result++ += *vector1++ * *vector2++;
|
||||
}
|
||||
}
|
||||
|
||||
// Cwise product and accumulate of a vector and a batch-vector. Since it's a MAC
|
||||
// operation, the assumption here is that result array is initialized to valid
|
||||
// values.
|
||||
template <typename T>
|
||||
inline void VectorBatchVectorCwiseProductAccumulate(const T* vector, int v_size,
|
||||
const T* batch_vector,
|
||||
int n_batch, T* result) {
|
||||
for (int b = 0; b < n_batch; b++) {
|
||||
VectorVectorCwiseProductAccumulate(vector, batch_vector, v_size, result);
|
||||
// Update the pointers.
|
||||
result += v_size;
|
||||
batch_vector += v_size;
|
||||
}
|
||||
}
|
||||
|
||||
// Batch vector initialization with another vector.
|
||||
template <typename T>
|
||||
void VectorBatchVectorAssign(const T* vector, int v_size, int n_batch,
|
||||
T* batch_vector) {
|
||||
for (int b = 0; b < n_batch; b++) {
|
||||
std::copy_n(vector, v_size, batch_vector + b * v_size);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace tensor_utils
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_
|
||||
@@ -52,6 +52,11 @@ constexpr uint32_t kFractionRoundingThreshold = 0x00200000;
|
||||
|
||||
void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
|
||||
int* shift) {
|
||||
#if TFLITE_SINGLE_ROUNDING
|
||||
// Single-rounding MultiplyByQuantizedMultiplier only supports positive
|
||||
// multipliers.
|
||||
// TFLITE_DCHECK(double_multiplier >= 0);
|
||||
#endif
|
||||
if (double_multiplier == 0.) {
|
||||
*quantized_multiplier = 0;
|
||||
*shift = 0;
|
||||
@@ -65,10 +70,10 @@ void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
|
||||
int64_t q_fixed = IntegerFrExp(double_multiplier, shift);
|
||||
#else // TFLITE_EMULATE_FLOAT
|
||||
const double q = std::frexp(double_multiplier, shift);
|
||||
auto q_fixed = static_cast<int64_t>(TfLiteRound(q * (1ll << 31)));
|
||||
auto q_fixed = static_cast<int64_t>(TfLiteRound(q * (1LL << 31)));
|
||||
#endif // TFLITE_EMULATE_FLOAT
|
||||
TFLITE_CHECK(q_fixed <= (1ll << 31));
|
||||
if (q_fixed == (1ll << 31)) {
|
||||
TFLITE_CHECK(q_fixed <= (1LL << 31));
|
||||
if (q_fixed == (1LL << 31)) {
|
||||
q_fixed /= 2;
|
||||
++*shift;
|
||||
}
|
||||
@@ -87,6 +92,14 @@ void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
|
||||
*shift = 0;
|
||||
q_fixed = 0;
|
||||
}
|
||||
#if TFLITE_SINGLE_ROUNDING
|
||||
// Single-rounding MultiplyByQuantizedMultiplier doesn't support a shift > 30,
|
||||
// saturate it.
|
||||
if (*shift > 30) {
|
||||
*shift = 30;
|
||||
q_fixed = (1LL << 31) - 1;
|
||||
}
|
||||
#endif
|
||||
*quantized_multiplier = static_cast<int32_t>(q_fixed);
|
||||
}
|
||||
|
||||
@@ -278,6 +291,12 @@ void PreprocessSoftmaxScaling(double beta, double input_scale,
|
||||
// result is double equivalent of Q0.31 (actually with more precision). Thus
|
||||
// this generates a Q(input_integer_bits).(31-input_integer_bits)
|
||||
// representation.
|
||||
#if TFLITE_SINGLE_ROUNDING
|
||||
const double max_real_multiplier = (1LL << 30) - 1.0;
|
||||
#else
|
||||
const double max_real_multiplier = (1LL << 31) - 1.0;
|
||||
#endif
|
||||
|
||||
#ifdef TFLITE_EMULATE_FLOAT
|
||||
const double input_beta = IntegerDoubleMultiply(beta, input_scale);
|
||||
int shift;
|
||||
@@ -285,12 +304,14 @@ void PreprocessSoftmaxScaling(double beta, double input_scale,
|
||||
shift += (31 - input_integer_bits);
|
||||
double input_beta_real_multiplier =
|
||||
DoubleFromFractionAndShift(fraction, shift);
|
||||
if (IntegerDoubleCompare(input_beta_real_multiplier, (1ll << 31) - 1.0) > 0) {
|
||||
input_beta_real_multiplier = (1ll << 31) - 1.0;
|
||||
if (IntegerDoubleCompare(input_beta_real_multiplier, max_real_multiplier) >
|
||||
0) {
|
||||
input_beta_real_multiplier = max_real_multiplier;
|
||||
}
|
||||
#else // TFLITE_EMULATE_FLOAT
|
||||
const double input_beta_real_multiplier = std::min<double>(
|
||||
beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0);
|
||||
const double input_beta_real_multiplier =
|
||||
std::min<double>(beta * input_scale * (1 << (31 - input_integer_bits)),
|
||||
max_real_multiplier);
|
||||
#endif // TFLITE_EMULATE_FLOAT
|
||||
|
||||
QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier,
|
||||
@@ -324,8 +345,8 @@ int CalculateInputRadius(int input_integer_bits, int input_left_shift,
|
||||
#else // TFLITE_EMULATE_FLOAT
|
||||
const double max_input_rescaled =
|
||||
1.0 * ((1 << input_integer_bits) - 1) *
|
||||
(1ll << (total_signed_bits - input_integer_bits)) /
|
||||
(1ll << input_left_shift);
|
||||
(1LL << (total_signed_bits - input_integer_bits)) /
|
||||
(1LL << input_left_shift);
|
||||
// Tighten bound using floor. Suppose that we could use the exact value.
|
||||
// After scaling the difference, the result would be at the maximum. Thus we
|
||||
// must ensure that our value has lower magnitude.
|
||||
@@ -43,16 +43,27 @@ inline void BroadcastBinaryFunction4DSlow(
|
||||
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
|
||||
unextended_input2_shape, &desc1, &desc2);
|
||||
|
||||
const int* dims_data =
|
||||
reinterpret_cast<const int*>(output_shape.DimsDataUpTo5D());
|
||||
for (int b = 0; b < output_shape.Dims(0); ++b) {
|
||||
int out_idx_b = b * dims_data[1];
|
||||
int in_idx1_b = desc1.strides[0] * b;
|
||||
int in_idx2_b = desc2.strides[0] * b;
|
||||
for (int y = 0; y < output_shape.Dims(1); ++y) {
|
||||
int out_idx_y = (out_idx_b + y) * dims_data[2];
|
||||
int in_idx1_y = in_idx1_b + desc1.strides[1] * y;
|
||||
int in_idx2_y = in_idx2_b + desc2.strides[1] * y;
|
||||
for (int x = 0; x < output_shape.Dims(2); ++x) {
|
||||
int out_idx_x = (out_idx_y + x) * dims_data[3];
|
||||
int in1_idx = in_idx1_y + desc1.strides[2] * x;
|
||||
int in2_idx = in_idx2_y + desc2.strides[2] * x;
|
||||
for (int c = 0; c < output_shape.Dims(3); ++c) {
|
||||
auto out_idx = Offset(output_shape, b, y, x, c);
|
||||
auto in1_idx = SubscriptToIndex(desc1, b, y, x, c);
|
||||
auto in2_idx = SubscriptToIndex(desc2, b, y, x, c);
|
||||
auto out_idx = out_idx_x + c;
|
||||
auto in1_val = input1_data[in1_idx];
|
||||
auto in2_val = input2_data[in2_idx];
|
||||
output_data[out_idx] = func(in1_val, in2_val);
|
||||
in1_idx += desc1.strides[3];
|
||||
in2_idx += desc2.strides[3];
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -68,6 +68,27 @@ inline int32_t DepthwiseConvRound(int32_t x, int32_t quantized_multiplier,
|
||||
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
|
||||
}
|
||||
|
||||
// Single-rounding MultiplyByQuantizedMultiplier
|
||||
#if TFLITE_SINGLE_ROUNDING
|
||||
template <>
|
||||
inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kAwayFromZero>(
|
||||
int32_t x, int32_t quantized_multiplier, int shift) {
|
||||
using gemmlowp::RoundingDivideByPOT;
|
||||
using gemmlowp::SaturatingRoundingDoublingHighMul;
|
||||
int left_shift = shift > 0 ? shift : 0;
|
||||
int right_shift = shift > 0 ? 0 : -shift;
|
||||
return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
|
||||
x * (1 << left_shift), quantized_multiplier),
|
||||
right_shift);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
|
||||
int32_t x, int32_t quantized_multiplier, int shift) {
|
||||
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
|
||||
}
|
||||
// Double-rounding MultiplyByQuantizedMultiplier
|
||||
#else
|
||||
template <>
|
||||
inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kAwayFromZero>(
|
||||
int32_t x, int32_t quantized_multiplier, int shift) {
|
||||
@@ -86,6 +107,7 @@ inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
|
||||
rounding_offset) >>
|
||||
right_shift;
|
||||
}
|
||||
#endif // TFLITE_SINGLE_ROUNDING
|
||||
|
||||
template <DepthwiseConvOutputRounding output_rounding>
|
||||
struct DepthwiseConvBasicKernel {
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user