Compare commits

..

155 Commits

Author SHA1 Message Date
jomjol
19ca0d7dd7 Update Versioninfo 2021-09-12 07:33:30 +02:00
jomjol
7fcb5d1c0c v8.3.0 2021-09-12 07:29:30 +02:00
jomjol
dd995ec28a Rolling 20210910 2021-09-10 09:26:52 +02:00
jomjol
af99de3535 IgnoreLeadingNaN 2021-09-02 11:04:01 +02:00
jomjol
3567cc2fb0 Merge pull request #330 from pixeldoc2000/pixeldoc2000-patch-1
Pixeldoc2000 patch 1
2021-09-02 11:00:49 +02:00
pixel::doc
5e9d9bd264 Update edit_config_param.html
Fixed some more Text.
2021-09-02 10:09:36 +02:00
pixel::doc
62447c1bb9 Update edit_config_param.html
Fixed some Text
2021-09-02 00:23:59 +02:00
jomjol
a86434c9a2 Rolling 20210831 2021-08-31 11:40:29 +02:00
jomjol
b7b70299f7 Rolling 20210830 2021-08-30 21:21:18 +02:00
jomjol
eb02e0aec1 new images 2021-08-29 20:57:21 +02:00
jomjol
7816e53db7 v8.2.0 2021-08-24 08:37:18 +02:00
jomjol
7ae08e572a Merge branch 'rolling' 2021-08-24 08:34:32 +02:00
jomjol
47d15d8adb v8.2.0 2021-08-24 08:33:56 +02:00
jomjol
0dac0e87e4 rolling 20210823 2021-08-23 18:57:48 +02:00
jomjol
b290099d5b v12.0.0 2021-08-12 07:25:12 +02:00
jomjol
f6b1a41a0b v12.0.0 2021-08-12 07:20:58 +02:00
jomjol
e529af04cf Update FeatureRequest.md 2021-08-10 20:19:05 +02:00
jomjol
6c365dd949 rolling v20210809 2021-08-09 21:53:07 +02:00
jomjol
32f15fc557 rolling 20210708 2021-08-07 15:25:27 +02:00
jomjol
6f06af1d5f Update README.md 2021-08-01 21:52:00 +02:00
jomjol
a91f99faab update 2021-08-01 21:49:29 +02:00
jomjol
17a87b23a1 v8.0.5 2021-08-01 21:46:17 +02:00
jomjol
d4b5ec2ae2 v8.0.4 2021-07-29 20:18:53 +02:00
jomjol
1bcaf09855 v8.0.4 2021-07-29 20:14:36 +02:00
jomjol
fa3842b2b4 v8.0.3 2021-07-25 18:15:35 +02:00
jomjol
ea72256e56 Merge branch 'rolling' 2021-07-25 18:08:43 +02:00
jomjol
be5828cb3e v8.0.3 2021-07-25 18:07:50 +02:00
jomjol
104b72505c Rolling - 20210725 2021-07-25 13:44:22 +02:00
jomjol
23728a0686 Update README.md 2021-07-23 21:02:19 +02:00
jomjol
eaaa856b13 Update README.md 2021-07-23 21:01:50 +02:00
jomjol
01e81d02b5 v8.0.2 2021-07-23 20:57:42 +02:00
jomjol
9ae8d0a512 Merge branch 'rolling' 2021-07-23 20:48:28 +02:00
jomjol
da16322fb8 v8.0.2 2021-07-23 20:47:23 +02:00
jomjol
a6d39afc26 rolling 20210723 2021-07-23 07:44:59 +02:00
jomjol
1b6a124f54 Update FeatureRequest.md 2021-07-21 07:00:12 +02:00
jomjol
8aff6bf8f3 Rolling 20210719 2021-07-19 21:54:14 +02:00
jomjol
21115752aa Merge pull request #280 from jomjol/rolling
v8.0.1
2021-07-18 16:58:23 +02:00
jomjol
025c2b88b9 v8.0.1 2021-07-18 16:57:35 +02:00
jomjol
655f9d7c97 Update version 2021-07-14 20:00:06 +02:00
jomjol
03b5e36114 Merge branch 'rolling' 2021-07-14 19:52:11 +02:00
jomjol
9c78c1e9ca v8.0.0 2021-07-14 19:48:49 +02:00
jomjol
136186a526 rolling 20210713 BETA v8.0.0 2021-07-13 21:41:17 +02:00
jomjol
1f6b02a671 Rolling 20210712 2021-07-12 22:06:32 +02:00
jomjol
76a0518d52 Rolling 20210721 BugFix 2021-07-11 23:47:24 +02:00
jomjol
a688a69af6 Merge branch 'rolling' of https://github.com/jomjol/AI-on-the-edge-device into rolling 2021-07-11 18:28:00 +02:00
jomjol
b890ffc5f3 Rolling 20210711 2021-07-11 18:27:25 +02:00
jomjol
b98558107e Merge pull request #263 from Zwer2k/rolling
Rolling
2021-07-11 17:51:09 +02:00
Zwer2k
3e360ad0fb add log in gpio handler
add nullpint check in tfliteCass
2021-07-11 16:19:45 +02:00
Zwer2k
a7ced407f8 Update README.md
improve case if gpio is disabled
remove /test html route
2021-07-11 13:56:37 +02:00
Zwer2k
45a1e137d1 Merge branch 'rolling' of https://github.com/jomjol/AI-on-the-edge-device into rolling 2021-07-11 11:24:26 +02:00
Zwer2k
a44e0d81cc gpio handler work 2021-07-10 12:36:13 +02:00
Zwer2k
749fc6699c Merge remote-tracking branch 'origin/gpio-handler' into rolling 2021-07-08 21:54:44 +02:00
jomjol
d7bb147a23 Rolling 20210708 2021-07-08 21:45:33 +02:00
jomjol
08b0b254f2 rolling 20210707 2021-07-07 21:57:59 +02:00
Zwer2k
5414a4c3f1 Merge remote-tracking branch 'upstream/rolling' into rolling 2021-07-06 23:33:01 +02:00
Zwer2k
7944ab329d litle improvements on html and config.ini 2021-07-06 21:32:49 +02:00
Zwer2k
8ca14a434c gpio handler works again
remove memory leak in FlowDigit
2021-07-06 01:25:20 +02:00
jomjol
e24ba68fec rolling 20210705 2021-07-05 07:30:04 +02:00
Zwer2k
b205326782 gpio handler is working 2021-07-04 23:59:59 +02:00
jomjol
daa1960dff rolling 20210704 bug fix 2021-07-04 10:20:07 +02:00
jomjol
894c7f6972 Revert "Revert "rolling 20210704 bugfix""
This reverts commit 737dcc76b8.
2021-07-04 08:12:50 +02:00
jomjol
737dcc76b8 Revert "rolling 20210704 bugfix"
This reverts commit b42f17916b.
2021-07-04 08:12:36 +02:00
jomjol
b42f17916b rolling 20210704 bugfix 2021-07-04 08:06:04 +02:00
jomjol
2c6ce6fd07 rolling 20210705 2021-07-04 07:54:17 +02:00
jomjol
f243f4b8ea Rolling 20210701 2021-07-01 19:47:44 +02:00
jomjol
02e881ebc0 Add files via upload 2021-07-01 19:15:36 +02:00
Zwer2k
7b8f10a14e work on GPIO handler
bigfix: memory leak in GetJPGStream
2021-06-25 01:19:23 +02:00
Zwer2k
d995c31b7b work on GPIO handler
bigfix: memory leak in GetJPGStream
2021-06-18 01:29:59 +02:00
jomjol
45154cb55c 20210617 2021-06-17 20:28:24 +02:00
jomjol
48067b10cd v7.1.2 2021-06-17 20:10:30 +02:00
Zwer2k
f24c40d780 work on GPIO handling 2021-06-13 01:24:02 +02:00
jomjol
f4edd36744 Update README.md 2021-06-11 07:41:06 +02:00
jomjol
a202a6abdc Rolling 20210611 2021-06-11 07:31:06 +02:00
Zwer2k
c25adfe28a add interrupt to gpio config
bugfix in gpio config
2021-06-09 00:16:31 +02:00
Zwer2k
822c6cc45c complete extended config.ini handling for GPIO settings
added TopicUptime and MainTopicGPIO
2021-06-08 22:24:53 +02:00
Zwer2k
c48b44d06a extend config.ini handler for GPIO settings
add BootTime incode
2021-06-08 00:59:28 +02:00
jomjol
21a59fbd35 Update README.md 2021-05-30 21:52:52 +02:00
jomjol
cdcf940d12 v7.1.1 2021-05-30 21:49:50 +02:00
jomjol
6cefc44fb6 Update README.md 2021-05-28 20:56:24 +02:00
jomjol
8308f159ad Merge pull request #237 from jomjol/master
Sync Rolling
2021-05-28 19:57:20 +02:00
jomjol
e5ff8f2164 Update Firmware 2021-05-28 19:56:10 +02:00
jomjol
a000252c8a Merge pull request #236 from jomjol/rolling
Update to v7.1.0
2021-05-28 19:53:08 +02:00
jomjol
9a42c580cf v7.1.0 2021-05-28 19:52:26 +02:00
jomjol
6e0a7a742e Rolling 210527 2021-05-27 19:22:27 +02:00
jomjol
026bac121f Rolling 20210522-v2 2021-05-22 11:37:46 +02:00
jomjol
8a26b817f7 Rolling 20210522 2021-05-22 07:39:10 +02:00
jomjol
528a4435a9 Rolling 20210520 2021-05-20 21:58:37 +02:00
jomjol
9b791bb7a7 rolling 20210520 2021-05-20 07:00:09 +02:00
jomjol
58eb0b1292 rolling 20210517 2021-05-17 19:35:38 +02:00
jomjol
39eda4a4be Merge pull request #224 from jomjol/master
sync rolling
2021-05-13 08:23:03 +02:00
jomjol
87a6445ff6 Update version in firmware 2021-05-13 08:22:20 +02:00
jomjol
b7e6d33d48 Merge pull request #223 from jomjol/rolling
Update v7.0.1
2021-05-13 08:17:03 +02:00
jomjol
52e9cd20ee Update v7.0.1 2021-05-13 08:16:01 +02:00
jomjol
b34bd5d988 Merge pull request #217 from jomjol/master
Synch rolling
2021-05-08 18:23:54 +02:00
jomjol
58d5e7bc58 v7.0.0 2021-05-08 18:23:10 +02:00
jomjol
1e09bfbb80 Merge pull request #216 from jomjol/rolling
Update to v7.0.0
2021-05-08 18:20:11 +02:00
jomjol
91fa1c066c Vorbereitung v7.0.0 2021-05-08 18:16:58 +02:00
jomjol
10d49b55d1 Rolling 20210507 2021-05-07 21:33:16 +02:00
jomjol
67d0bf6a27 Update config.ini 2021-05-06 21:54:38 +02:00
jomjol
d36cbde7aa Rolling 20210506v2 2021-05-06 21:50:14 +02:00
jomjol
016f4088d4 Rolling 20210506 2021-05-06 20:28:27 +02:00
jomjol
c2d1bbb4be Merge pull request #205 from jomjol/rolling
v6.7.2
2021-05-01 19:53:26 +02:00
jomjol
bc6a01444a v6.7.2 2021-05-01 19:52:10 +02:00
jomjol
24f0902194 Merge pull request #204 from jomjol/master
Sync Rolling
2021-05-01 17:47:08 +02:00
jomjol
19fd6a10dd v6.7.1 2021-05-01 17:44:56 +02:00
jomjol
a45a5296e4 Merge branch 'rolling' into master 2021-05-01 17:40:31 +02:00
jomjol
1459bb15c1 Prepare v6.7.1 2021-05-01 17:32:22 +02:00
jomjol
ce5f3c463b Rolling 2021-05-01 08:15:11 +02:00
jomjol
04ebbf35e7 Update README.md 2021-04-23 07:22:07 +02:00
jomjol
ba1d6e30e2 Update README.md 2021-04-23 07:15:16 +02:00
jomjol
e9ac8933f9 Update Version 2021-04-23 07:14:11 +02:00
jomjol
ec96b7f878 Merge pull request #194 from jomjol/rolling
Update to v6.7.0
2021-04-23 07:10:11 +02:00
jomjol
ba7d429178 v6.7.0 2021-04-23 07:08:18 +02:00
jomjol
79be2089be Prepare to v6.7.0 2021-04-23 07:04:05 +02:00
jomjol
ea2305de47 Rolling 20210420 2021-04-20 19:44:16 +02:00
jomjol
635b2c35a8 Update README.md 2021-04-08 10:42:42 +02:00
jomjol
afdc4bb3f1 Merge pull request #179 from queeek/patch-1
Update README.md
2021-04-08 10:38:53 +02:00
Ina
3d49ec72ba Update README.md
3D Housing link is linked to the housing only project
2021-04-08 10:07:21 +02:00
jomjol
520f818adc Merge pull request #176 from jomjol/master
Sync Rolling
2021-04-05 10:18:36 +02:00
jomjol
20b054472e Update 2021-04-05 10:17:54 +02:00
jomjol
21a70c5655 Merge pull request #175 from jomjol/rolling
Update to v6.6.1
2021-04-05 10:12:59 +02:00
jomjol
08270f5d6d Update to v6.6.1 2021-04-05 10:12:21 +02:00
jomjol
9923be2f1d Merge pull request #171 from jomjol/master
Sync Rolling
2021-03-28 20:12:17 +02:00
jomjol
5df57c95d4 Update to v6.6.0 2021-03-28 20:11:22 +02:00
jomjol
d8c91466d0 Merge pull request #170 from jomjol/rolling
Update to v6.6.0
2021-03-28 20:08:46 +02:00
jomjol
37b2e370fe Prepare v6.6.0 2021-03-28 20:08:02 +02:00
jomjol
98dfba0640 Rolling 20210327 2021-03-27 17:16:54 +01:00
jomjol
574c9084c2 Merge pull request #166 from jomjol/master
Sync Rolling
2021-03-25 21:01:25 +01:00
jomjol
9862ae8e7a Update to v6.5.0 2021-03-25 20:58:44 +01:00
jomjol
7bc4e63209 Merge pull request #165 from jomjol/rolling
Update to v6.5.0
2021-03-25 20:51:30 +01:00
jomjol
ad40150cfa Update 2021-03-25 20:50:10 +01:00
jomjol
970530d99f Update Rolling to v6.5.0 2021-03-25 20:48:19 +01:00
jomjol
c6ae989b82 Update Restart Hostname 2021-03-21 20:56:30 +01:00
jomjol
a0ebf354b1 Create focus_adjustment.jpg 2021-03-21 19:52:23 +01:00
jomjol
97ecbc792e Create focus_adjustment.jpg 2021-03-21 19:50:01 +01:00
jomjol
5934a59489 Update 2021-03-21 19:41:34 +01:00
jomjol
ee18046581 Rolling 20210321 2021-03-21 19:24:20 +01:00
jomjol
1e4e38c02f Merge pull request #158 from jomjol/master
Update rolling to v6.4.0
2021-03-21 12:56:08 +01:00
jomjol
7a3038eceb Update FeatureRequest.md 2021-03-21 12:49:23 +01:00
jomjol
7d2f86b72e Update README.md 2021-03-21 12:45:52 +01:00
jomjol
3aaa319505 Update README.md 2021-03-21 12:45:03 +01:00
jomjol
f4075f0a51 Create FeatureRequest.md 2021-03-21 12:39:28 +01:00
jomjol
59643a8d52 Update README.md 2021-03-21 11:44:46 +01:00
jomjol
baf2a880e4 Merge pull request #157 from jomjol/master
Align Rolling to v6.4.0
2021-03-20 09:49:22 +01:00
jomjol
d71e8320c7 Update to v6.4.0 2021-03-20 09:47:50 +01:00
jomjol
3b3d924f40 final update 2021-03-16 21:14:09 +01:00
jomjol
60701bc007 Merge branch 'rolling' into master 2021-03-16 21:10:07 +01:00
jomjol
5ca3e184e0 Prepare v6.3.1 2021-03-16 21:02:27 +01:00
jomjol
2903d1a0a6 Update 2021-03-14 12:59:14 +01:00
jomjol
5f0f1802a4 Merge pull request #150 from jomjol/rolling
Rolling
2021-03-14 12:55:44 +01:00
jomjol
5be56d9b00 Prepare for 6.3.0 2021-03-14 12:52:23 +01:00
jomjol
d3fd1b5045 Rollling 20210313 2021-03-13 17:48:12 +01:00
jomjol
4615e87483 Merge pull request #147 from jomjol/rolling
Rolling
2021-03-10 21:16:41 +01:00
jomjol
fb9b72deea v6.2.2 2021-03-10 21:15:47 +01:00
jomjol
5cc873a6bb Merge pull request #141 from jomjol/master
Synch Master to Rolling
2021-03-09 21:11:05 +01:00
303 changed files with 24883 additions and 12770 deletions

View File

@@ -1,6 +1,238 @@
# Versions
##### 7.1.2 MQTT-Update - (2021-06-17)
* NEW: 7.1.2: bug fix setting hostname, Flash-LED not off during reboot
* NEW: 7.1.1: bug fix wlan password with "=" (again)
* MQTT error message: changes "no error", send retain flag
* Update wlan handling to esp-idf 4.1
* Upgrade digital CNN to v8.7.0 (added new images)
* Bug fix: MQTT, WLAN, LED-Controll, GPIO usage, fixed IP, calculation flow rate
##### 7.0.1 MQTT-Update - (2021-05-13)
* NEW: 7.0.1: bug fix wlan password with "="
* Upgrade digital CNN to v8.5.0 (added new images)
* New MQTT topics: flow rate (units/minute), time stamp (last correct read readout)
* Update MQTT/Error topic to " " in case no error (instead of empty string)
* Portrait or landscape image orientation in rotated image (avoid cropping)
##### 6.7.2 Image Processing in Memory - (2021-05-01)
* NEW 6.7.2: Updated html for setup modus - remove reboot on edit configuration)
* NEW 6.7.1: Improved stability of camera (back to v6.6.1) - remove black strips and areas
* Upgrade digital CNN to v8.3.0 (added new type of digits)
* Internal update: TFlite (v2.5), esp32cam, startup sequence
* Rollback to espressif v2.1.0, as v3.2.0 shows unstable reboot
* Bugfix: WLan-passwords, reset of hostname
##### 6.6.1 Image Processing in Memory - (2021-04-05)
* NEW 6.6.1: failed SD card initialization indicated by fast blinking LED at startup
* Improved SD-card handling (increase compatibility with more type of cards)
##### 6.5.0 Image Processing in Memory - (2021-03-25)
* Upgrade digital CNN to v8.2.0 (added new type of digits)
* Supporting alignment structures in ROI definition
* Bug fixing: definition of hostname in `config.ini`
##### 6.4.0 Image Processing in Memory - (2021-03-20)
* Additional alignment marks for settings the ROIs (analog and digit)
* Upgrade analog CNN to v7.0.0 (added new type of pointer)
##### 6.3.1 Image Processing in Memory - (2021-03-16)
* NEW: 6.3.1: bug fixing in initial edit reference image and `config.ini` (Spelling error in `InitialRotate`)
* Initial setup mode: bug fixing, error correction
* Bug-fixing
##### 6.2.2 Image Processing in Memory - (2021-03-10)
* NEW 6.2.2: bug fixing
* NEW 6.2.1: Changed brightness and contrast to default if not enabled (resolves to bright images)
* Determination of fixed illumination settings during startup - speed up of 5s in each run
* Update digital CNN to v8.1.1 (additional digital images trained)
* Extended error message in MQTT error message
* Image brightness is now adjustable
* Bug fixing: minor topics
##### 6.1.0 Image Processing in Memory - (2021-01-20)
* Disabling of analog / digital counters in configuration
* Improved Alignment Algorithm (`AlignmentAlgo` = `Default`, `Accurate` , `Fast`)
* Analog counters: `ExtendedResolution` (last digit is extended by sub comma value of CNN)
* `config.ini`: additional parameter `hostname` (additional to wlan.ini)
* Switching of GPIO12/13 via http-interface: `/GPIO?GPIO=12&Status=high/low`
* Bug fixing: html configuration page, wlan password ("=" now possible)
##### 6.0.0 Image Processing in Memory - (2021-01-02)
* **Major change**: image processing fully in memory - no need of SD card buffer anymore
* Need to limit camera resolution to VGA (due to memory limits)
* MQTT: Last Will Testament (LWT) implemented: "connection lost" in case of connection lost to `TopicError`
* Disabled `CheckDigitIncreaseConsistency` in default configuration - must now be explicit enabled if needed
* Update digital CNN to v7.2.1 (additional digital images trained)
* Setting of arbitrary time server in `config.ini`
* Option for fixed IP-, DNS-Settings in `wlan.ini`
* Increased stability (internal image and camera handling)
* Bug fixing: edit digits, handling PreValue, html-bugs
##### 5.0.0 Setup Modus - (2020-12-06)
* Implementation of initial setup modus for fresh installation
* Code restructuring (full compatibility between pure ESP-IDF and Platformio w/ espressif)
##### 4.1.1 Configuration editor - (2020-12-02)
* Bug fixing: internal improvement of file handling (reduce not responding)
##### 4.1.0 Configuration editor - (2020-11-30)
* Implementation of configuration editor (including basic and expert mode)
* Adjustable time zone to adjust to local time setting (incl. daylight saving time)
* MQTT: additional topic for error reporting
* standardized access to current logfile via `http://IP-ADRESS/logfileact`
* Update digital CNN to v7.2.0, analog CNN to 6.3.0
* Bug fixing: truncation error, CheckDigitConsistency & PreValue implementation
##### 4.0.0 Tflite Core - (2020-11-15)
* Implementation of rolling log-files
* Update Tflite-Core to master@20201108 (v2.4)
* Bug-fixing for reducing reboots
##### 3.1.0 MQTT-Client - (2020-10-26)
* Update digital CNN to v6.5.0 and HTML (Info to hostname, IP, ssid)
* New implementation of "checkDigitConsistency" also for digits
* MQTT-Adapter: user and password for sign in MQTT-Broker
##### 3.0.0 MQTT-Client (2020-10-14)
* Implementation of MQTT Client
* Improved Version Control
* bug-fixing
##### 2.2.1 Version Control (2020-09-27)
* Bug-Fixing (hostname in wlan.ini and error handling inside flow)
##### 2.2.0 Version Control (2020-09-27)
* Integrated automated versioning system (menu: SYSTEM --> INFO)
* Update Build-System to PlatformIO - Espressif 32 v2.0.0 (ESP-IDF 4.1)
##### 2.1.0 Decimal Shift, Chrome & Edge (2020-09-25)
* Implementation of Decimal Shift
* Update default CNN for digits to v6.4.0
* Improvement HTML
* Support for Chrome and Edge
* Reduce logging to minimum - extended logging on demand
* Implementation of hostname in wlan.ini (`hostname = "HOSTNAME")`
* Bug fixing, code corrections
##### 2.0.0 Layout update (2020-09-12)
* Update to **new and modern layout**
* Support for Chrome improved
* Improved robustness: improved error handling in auto flow reduces spontaneous reboots
* File server: Option for "DELETE ALL"
* WLan: support of spaces in SSID and password
* Reference Image: Option for mirror image, option for image update on the fly
* additional parameter in `wasserzaehler.html?noerror=true` to suppress an potential error message
* bug fixing
##### 1.1.3 (2020-09-09)
* **Bug in configuration of analog ROIs corrected** - correction in v.1.0.2 did not work properly
* Improved update page for the web server (`/html` can be updated via a zip-file, which is provided in `/firmware/html.zip`)
* Improved Chrome support
##### 1.1.0 (2020-09-06)
* Implementation of "delete complete directory"
**Attention: beside the `firmware.bin`, also the content of `/html` needs to be updated!**
##### 1.0.2 (2020-09-06)
* Bug in configuration of analog ROIs corrected
* minor bug correction
##### 1.0.1 (2020-09-05)
* preValue.ini Bug corrected
* minor bug correction
##### 1.0.0 (2020-09-04)
* **First usable version** - compatible to previous project (https://github.com/jomjol/water-meter-system-complete)
* NEW:
* no docker container for CNN calculation necessary
* web based configuration editor on board
##### 0.1.0 (2020-08-07)
* Initial Version
* Initial Version

123
FeatureRequest.md Normal file
View File

@@ -0,0 +1,123 @@
## Feature Requests
**There are a lot of ideas for further improvements, but only limited capacity on side of the developer.** Therefore I have created this page as a collection of ideas.
1. Who ever has a new idea can put it here, so it that it is not forgotten.
2. Who ever has time, capacity and passion to support, can take any of the ideas and implement them.
I will support and help where ever I can!
____
#### #10 Improve and bug fix logging of images
* https://github.com/jomjol/AI-on-the-edge-device/issues/307
#### #9 Basic auth for the UI
* https://github.com/jomjol/AI-on-the-edge-device/issues/283
* Implementation of an authentication mechanism.
#### #8 MQTT configurable readout intervall
Make the readout intervall configurable via MQTT.
* Change the mqtt part to receive and process input and not only sending
#### #7 Extended Error Handling
Check different types of error (e.g. tflite not availabe) and generate an error on the html page.
To do:
* Make a list of "important" errors
* Implement a checking algo
* Extend the firmware and html page for the error handling
#### ~~#6 Check for double ROI names~~ - implemented v8.0.0
~~Check during configuration, that ROI names are unique.~~
~~To do:~~
* ~~Implementation of ROI name checking in html code before saving analog or digital ROIs~~
#### #5 Configurable decimal separator (point or comma)
Decimal separator configurable for different systems
To do:
* Implementation of decimal point into postprocessing module
* Extension of configuration
* Adaption of the html configuration to implement shifting
#### ~~#4 Initial Shifting and Rotation~~ - implemented v7.0.0
* ~~https://github.com/jomjol/AI-on-the-edge-device/issues/123~~
~~Implementation of a shifting additional to the initial rotation of the raw camera input~~
~~To do:~~
* ~~Implementation of shifting~~
* ~~Extension of configuration~~
* ~~Adaption of the html configuration to implement shifting~~
#### ~~#3 Allow grouping of digits to multiple reading values~~ - implemented v8.0.0
* ~~https://github.com/jomjol/AI-on-the-edge-device/issues/123~~
~~Implementation of two different independent readouts in one setup~~
~~To do:~~
* ~~Extend the configuration, setting and processing flow for two independend readouts~~
____
#### #2 MQTT-controll with callback
* https://github.com/jomjol/AI-on-the-edge-device/issues/105
Extend the MQTT client to also enable callbacks for configuration setting
To do:
* implement callback for receiving information and override `config.ini` settings
* change configuration management to handle online updates (currently changes need a restart)
* think about the startup, as there the default config is loaded
____
#### ~~#1 Optional GPIO for external flash/lighting~~ - implemented (v8.0.0)
* ~~https://github.com/jomjol/AI-on-the-edge-device/issues/133~~
~~Implementation of an an extrnal flash / lightning through GPIOs.~~
* ~~available GPIOs: 12 & 13 (currently in use for html switching)~~
~~To do:~~
* ~~Implementation of a software module for external light source (e.g. WS8132 LED controller, ...)~~
* ~~Update of the camera module to use the external light instead of the internal flash light~~
* ~~Adopt the configuration algorithm with a configurable light source~~

209
README.md
View File

@@ -4,12 +4,16 @@ This is an example of Artificial Intelligence (AI) calculations on a very cheap
### Details on **function**, **installation** and **configuration** can be found on the **[Wiki Page](https://github.com/jomjol/AI-on-the-edge-device/wiki)**
A 3d-printable housing can be found here: https://www.thingiverse.com/thing:4571627
A 3d-printable housing can be found here: https://www.thingiverse.com/thing:4573481
respectively ESP32-Cam housing only: https://www.thingiverse.com/thing:4571627
<img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/watermeter_all.jpg" width="200"><img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/main.jpg" width="200"><img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/size.png" width="200">
<img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/watermeter.jpg" width="600">
<img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/powermeter.jpg" width="600">
@@ -24,7 +28,9 @@ If you would like to support the developer with a cup of coffee you can do that
<input type="image" src="https://www.paypalobjects.com/en_US/DK/i/btn/btn_donateCC_LG.gif" border="0" name="submit" title="PayPal - The safer, easier way to pay online!" alt="Donate with PayPal button" />
<img alt="" border="0" src="https://www.paypal.com/en_DE/i/scr/pixel.gif" width="1" height="1" />
</form>
If you have any technical topics, you can file a issue in this repository.
In other cases you can contact the developer via email: <img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/mail.jpg" height="25">
## Change log
@@ -41,185 +47,76 @@ If you would like to support the developer with a cup of coffee you can do that
##### 6.2.1 Image Processing in Memory - (2021-03-08)
##### 8.3.0 - Multi Meter Support (2021-09-12)
* NEW 6.2.1: Changed brightness and contrast to default if not enabled (resolves to bright images)
* Determination of fixed illumination settings during startup - speed up of 5s in each run
* Update digital CNN to v8.1.1 (additional digital images trained)
* Extended error message in MQTT error message
* Upgrade digital CNN to v12.1.0 (added new images)
* Dedicated NaN handling, internal refactoring (CNN-Handling)
* HTML: confirmation after config.ini update
* Bug fixing
##### 8.2.0 - Multi Meter Support (2021-08-24)
* Improve server responsiveness
* Flow status and prevalue status in overview
* Improved prevalue handling
##### 8.1.0 - Multi Meter Support (2021-08-12)
* GPIO: using the general mqtt main topic for GPIO
* Upgrade digital CNN to v12.0.0 (added new images)
* Update tfmicro to new master (2021-08-07)
* Bug fix: remove text in mqtt value, remove connect limit in wlan reconnet
##### 8.0.5 - Multi Meter Support (2021-08-01)
* NEW 8.0.5: bug fix: saving prevalue
* NEW 8.0.4: bug fix: load config.ini after upgrade
* NEW 8.0.3: bug fix: reboot during `config.ini` handling, html error
* NEW 8.0.2: saving roundes prevalue, bug fix html server
* NEW 8.0.1: bug fix: html handling of parameter `FixedExposure` and `ImageSize`
* Dual / multi meter support (more than 1 number to be recognized)
This is implemented with the feature "number" on the ROI definition as well as selected options
* MQTT: standardization of the naming - including new topics (`json`, `freeMem `, `uptime`)c
* Preparation for extended GPIO support (thanks to Zwerk2k) - not tested and fully functional yet
* Bug fixing: html server, memory leak, MQTT connect, hostname, turn of flash LED
<span style="color: red;">**ATTENTION: the configuration and prevalue files are modified automatically and will not be backward compatible!**</span>
* Image brightness is now adjustable
## Additional ideas
There are some ideas and feature request, which are not followed currently - mainly due to capacity reasons on side of the developer. They are collected here: [FeatureRequest.md](FeatureRequest.md)
* Bug fixing: minor topics
------
##### 6.1.0 Image Processing in Memory - (2021-01-20)
## History
* Disabling of analog / digital counters in configuration
* Improved Alignment Algorithm (`AlignmentAlgo` = `Default`, `Accurate` , `Fast`)
* Analog counters: `ExtendedResolution` (last digit is extended by sub comma value of CNN)
* `config.ini`: additional parameter `hostname` (additional to wlan.ini)
* Switching of GPIO12/13 via http-interface: `/GPIO?GPIO=12&Status=high/low`
* Bug fixing: html configuration page, wlan password ("=" now possible)
##### 7.1.2 MQTT-Update - (2021-06-17)
##### 6.0.0 Image Processing in Memory - (2021-01-02)
**7.0.1 MQTT-Update - (2021-05-13)**
* **Major change**: image processing fully in memory - no need of SD card buffer anymore
* Need to limit camera resolution to VGA (due to memory limits)
* MQTT: Last Will Testament (LWT) implemented: "connection lost" in case of connection lost to `TopicError`
* Disabled `CheckDigitIncreaseConsistency` in default configuration - must now be explicit enabled if needed
* Update digital CNN to v7.2.1 (additional digital images trained)
* Setting of arbitrary time server in `config.ini`
* Option for fixed IP-, DNS-Settings in `wlan.ini`
* Increased stability (internal image and camera handling)
* Bug fixing: edit digits, handling PreValue, html-bugs
##### 6.7.2 Image Processing in Memory - (2021-05-01)
##### 5.0.0 Setup Modus - (2020-12-06)
* Implementation of initial setup modus for fresh installation
* Code restructuring (full compatibility between pure ESP-IDF and Platformio w/ espressif)
##### 4.1.1 Configuration editor - (2020-12-02)
* Bug fixing: internal improvement of file handling (reduce not responding)
##### 4.1.0 Configuration editor - (2020-11-30)
* Implementation of configuration editor (including basic and expert mode)
* Adjustable time zone to adjust to local time setting (incl. daylight saving time)
* MQTT: additional topic for error reporting
* standardized access to current logfile via `http://IP-ADRESS/logfileact`
* Update digital CNN to v7.2.0, analog CNN to 6.3.0
* Bug fixing: truncation error, CheckDigitConsistency & PreValue implementation
##### 4.0.0 Tflite Core - (2020-11-15)
* Implementation of rolling log-files
* Update Tflite-Core to master@20201108 (v2.4)
* Bug-fixing for reducing reboots
##### 3.1.0 MQTT-Client - (2020-10-26)
* Update digital CNN to v6.5.0 and HTML (Info to hostname, IP, ssid)
* New implementation of "checkDigitConsistency" also for digits
* MQTT-Adapter: user and password for sign in MQTT-Broker
##### 3.0.0 MQTT-Client (2020-10-14)
* Implementation of MQTT Client
* Improved Version Control
* bug-fixing
##### 2.2.1 Version Control - (2020-09-27)
##### 2.2.1 Version Control (2020-09-27)
* Bug-Fixing (hostname in wlan.ini and error handling inside flow)
##### 2.1.0 Decimal Shift, Chrome & Edge - (2020-09-25)
##### 2.2.0 Version Control (2020-09-27)
##### 2.0.0 Layout update - (2020-09-12)
* Integrated automated versioning system (menu: SYSTEM --> INFO)
* Update Build-System to PlatformIO - Espressif 32 v2.0.0 (ESP-IDF 4.1)
##### 2.1.0 Decimal Shift, Chrome & Edge (2020-09-25)
* Implementation of Decimal Shift
* Update default CNN for digits to v6.4.0
* Improvement HTML
* Support for Chrome and Edge
* Reduce logging to minimum - extended logging on demand
* Implementation of hostname in wlan.ini (`hostname = "HOSTNAME")`
* Bug fixing, code corrections
##### 2.0.0 Layout update (2020-09-12)
* Update to **new and modern layout**
* Support for Chrome improved
* Improved robustness: improved error handling in auto flow reduces spontaneous reboots
* File server: Option for "DELETE ALL"
* WLan: support of spaces in SSID and password
* Reference Image: Option for mirror image, option for image update on the fly
* additional parameter in `wasserzaehler.html?noerror=true` to suppress an potential error message
* bug fixing
##### 1.1.3 (2020-09-09)
* **Bug in configuration of analog ROIs corrected** - correction in v.1.0.2 did not work properly
* Improved update page for the web server (`/html` can be updated via a zip-file, which is provided in `/firmware/html.zip`)
* Improved Chrome support
##### 1.1.0 (2020-09-06)
* Implementation of "delete complete directory"
**Attention: beside the `firmware.bin`, also the content of `/html` needs to be updated!**
##### 1.0.2 (2020-09-06)
* Bug in configuration of analog ROIs corrected
* minor bug correction
##### 1.0.1 (2020-09-05)
* preValue.ini Bug corrected
* minor bug correction
##### 1.0.0 (2020-09-04)
* **First usable version** - compatible to previous project (https://github.com/jomjol/water-meter-system-complete)
* NEW:
* no docker container for CNN calculation necessary
* web based configuration editor on board
##### 0.1.0 (2020-08-07)
* Initial Version
##### 1.1.3 Initial Version - (2020-09-09)
#### [Full Changelog](Changelog.md)
## Solved topics
* n.a.

View File

@@ -1,3 +1,3 @@
copy "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\code\.pio\build\esp32cam\firmware.bin" "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\firmware\firmware.bin"
copy "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\code\.pio\build\esp32cam\bootloader.bin" "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\firmware\bootloader.bin"
copy "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\code\.pio\build\esp32cam\partitions.bin" "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\firmware\partitions.bin"
copy "..\..\code\.pio\build\esp32cam\firmware.bin" "..\..\firmware\firmware.bin"
copy "..\..\code\.pio\build\esp32cam\bootloader.bin" "..\..\firmware\bootloader.bin"
copy "..\..\code\.pio\build\esp32cam\partitions.bin" "..\..\firmware\partitions.bin"

View File

@@ -1 +1 @@
powershell Compress-Archive "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\sd-card\html\*.*" "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\firmware\html.zip"
powershell Compress-Archive "..\..\sd-card\html\*.*" "..\..\firmware\html.zip"

View File

@@ -1,539 +0,0 @@
#include "connect_wlan.h"
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/event_groups.h"
#include "esp_wifi.h"
#include "esp_log.h"
#include <fstream>
#include <vector>
#include <sstream>
#include "Helper.h"
static const char *TAG = "connect_wlan";
std::string ssid = "";
std::string passphrase = "";
std::string hostname = "";
std::string ipaddress = "";
std::string gw = "";
std::string netmask = "";
std::string dns = "";
std::string std_hostname = "watermeter";
#define BLINK_GPIO GPIO_NUM_33
static EventGroupHandle_t s_wifi_event_group;
#define WIFI_CONNECTED_BIT BIT0
#define WIFI_FAIL_BIT BIT1
static int s_retry_num = 0;
std::vector<string> ZerlegeZeile(std::string input, std::string _delimiter = "")
{
std::vector<string> Output;
std::string delimiter = " =,";
if (_delimiter.length() > 0){
delimiter = _delimiter;
}
input = trim(input, delimiter);
size_t pos = findDelimiterPos(input, delimiter);
std::string token;
while (pos != std::string::npos) {
token = input.substr(0, pos);
token = trim(token, delimiter);
Output.push_back(token);
input.erase(0, pos + 1);
input = trim(input, delimiter);
pos = findDelimiterPos(input, delimiter);
}
Output.push_back(input);
return Output;
}
void blinkstatus(int dauer, int _anzahl)
{
gpio_reset_pin(BLINK_GPIO);
gpio_set_direction(BLINK_GPIO, GPIO_MODE_OUTPUT);
for (int i = 0; i < _anzahl; ++i)
{
gpio_set_level(BLINK_GPIO, 0);
vTaskDelay(dauer / portTICK_PERIOD_MS);
gpio_set_level(BLINK_GPIO, 1);
vTaskDelay(dauer / portTICK_PERIOD_MS);
}
}
void strinttoip4(std::string ip, int &a, int &b, int &c, int &d) {
std::stringstream s(ip);
char ch; //to temporarily store the '.'
s >> a >> ch >> b >> ch >> c >> ch >> d;
}
static void event_handler_neu(void* arg, esp_event_base_t event_base,
int32_t event_id, void* event_data)
{
if (event_base == WIFI_EVENT && event_id == WIFI_EVENT_STA_START) {
blinkstatus(200, 1);
esp_wifi_connect();
} else if (event_base == WIFI_EVENT && event_id == WIFI_EVENT_STA_DISCONNECTED) {
blinkstatus(200, 5);
esp_wifi_connect();
s_retry_num++;
ESP_LOGI(TAG, "retry to connect to the AP");
} else if (event_base == IP_EVENT && event_id == IP_EVENT_STA_GOT_IP) {
blinkstatus(1000, 3);
ip_event_got_ip_t* event = (ip_event_got_ip_t*) event_data;
ESP_LOGI(TAG, "got ip:" IPSTR, IP2STR(&event->ip_info.ip));
s_retry_num = 0;
xEventGroupSetBits(s_wifi_event_group, WIFI_CONNECTED_BIT);
}
}
void initialise_wifi()
{
s_wifi_event_group = xEventGroupCreate();
ESP_ERROR_CHECK(esp_netif_init());
ESP_ERROR_CHECK(esp_event_loop_create_default());
esp_netif_create_default_wifi_sta();
wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
ESP_ERROR_CHECK(esp_wifi_init(&cfg));
esp_event_handler_instance_t instance_any_id;
esp_event_handler_instance_t instance_got_ip;
ESP_ERROR_CHECK(esp_event_handler_instance_register(WIFI_EVENT,
ESP_EVENT_ANY_ID,
&event_handler_neu,
NULL,
&instance_any_id));
ESP_ERROR_CHECK(esp_event_handler_instance_register(IP_EVENT,
IP_EVENT_STA_GOT_IP,
&event_handler_neu,
NULL,
&instance_got_ip));
wifi_config_t wifi_config = { };
strcpy((char*)wifi_config.sta.ssid, (const char*)ssid.c_str());
strcpy((char*)wifi_config.sta.password, (const char*)passphrase.c_str());
ESP_ERROR_CHECK(esp_wifi_set_mode(WIFI_MODE_STA) );
ESP_ERROR_CHECK(esp_wifi_set_config(ESP_IF_WIFI_STA, &wifi_config) );
ESP_ERROR_CHECK(esp_wifi_start() );
ESP_LOGI(TAG, "wifi_init_sta finished.");
// Waiting until either the connection is established (WIFI_CONNECTED_BIT) or connection failed for the maximum
// number of re-tries (WIFI_FAIL_BIT). The bits are set by event_handler() (see above)
EventBits_t bits = xEventGroupWaitBits(s_wifi_event_group,
WIFI_CONNECTED_BIT | WIFI_FAIL_BIT,
pdFALSE,
pdFALSE,
portMAX_DELAY);
// xEventGroupWaitBits() returns the bits before the call returned, hence we can test which event actually
// happened.
if (bits & WIFI_CONNECTED_BIT) {
ESP_LOGI(TAG, "connected to ap SSID:%s password:%s",
ssid.c_str(), passphrase.c_str());
} else if (bits & WIFI_FAIL_BIT) {
ESP_LOGI(TAG, "Failed to connect to SSID:%s, password:%s",
ssid.c_str(), passphrase.c_str());
} else {
ESP_LOGE(TAG, "UNEXPECTED EVENT");
}
// The event will not be processed after unregister
ESP_ERROR_CHECK(esp_event_handler_instance_unregister(IP_EVENT, IP_EVENT_STA_GOT_IP, instance_got_ip));
ESP_ERROR_CHECK(esp_event_handler_instance_unregister(WIFI_EVENT, ESP_EVENT_ANY_ID, instance_any_id));
vEventGroupDelete(s_wifi_event_group);
tcpip_adapter_ip_info_t ip_info;
ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info));
ipaddress = std::string(ip4addr_ntoa(&ip_info.ip));
netmask = std::string(ip4addr_ntoa(&ip_info.netmask));
gw = std::string(ip4addr_ntoa(&ip_info.gw));
printf("IPv4 : %s\n", ip4addr_ntoa(&ip_info.ip));
printf("HostName : %s\n", hostname.c_str());
}
void initialise_wifi_fixed_ip2()
{
s_wifi_event_group = xEventGroupCreate();
ESP_ERROR_CHECK(esp_netif_init());
ESP_ERROR_CHECK(esp_event_loop_create_default());
esp_netif_t *my_sta = esp_netif_create_default_wifi_sta();
esp_netif_dhcpc_stop(my_sta);
esp_netif_ip_info_t ip_info;
int a, b, c, d;
strinttoip4(ipaddress, a, b, c, d);
IP4_ADDR(&ip_info.ip, a, b, c, d);
strinttoip4(gw, a, b, c, d);
IP4_ADDR(&ip_info.gw, a, b, c, d);
strinttoip4(netmask, a, b, c, d);
IP4_ADDR(&ip_info.netmask, a, b, c, d);
esp_netif_set_ip_info(my_sta, &ip_info);
wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
ESP_ERROR_CHECK(esp_wifi_init(&cfg));
if (dns.length() > 0) {
esp_netif_dns_info_t dns_info;
ip4_addr_t ip;
ip.addr = esp_ip4addr_aton(dns.c_str());
ip_addr_set_ip4_u32(&dns_info.ip, ip.addr);
ESP_ERROR_CHECK(esp_netif_set_dns_info(my_sta, ESP_NETIF_DNS_MAIN, &dns_info));
}
esp_event_handler_instance_t instance_any_id;
esp_event_handler_instance_t instance_got_ip;
ESP_ERROR_CHECK(esp_event_handler_instance_register(WIFI_EVENT,
ESP_EVENT_ANY_ID,
&event_handler_neu,
NULL,
&instance_any_id));
ESP_ERROR_CHECK(esp_event_handler_instance_register(IP_EVENT,
IP_EVENT_STA_GOT_IP,
&event_handler_neu,
NULL,
&instance_got_ip));
wifi_config_t wifi_config = { };
strcpy((char*)wifi_config.sta.ssid, (const char*)ssid.c_str());
strcpy((char*)wifi_config.sta.password, (const char*)passphrase.c_str());
ESP_ERROR_CHECK(esp_wifi_set_mode(WIFI_MODE_STA) );
ESP_ERROR_CHECK(esp_wifi_set_config(ESP_IF_WIFI_STA, &wifi_config) );
ESP_ERROR_CHECK(esp_wifi_start() );
ESP_LOGI(TAG, "wifi_init_sta finished.");
// Waiting until either the connection is established (WIFI_CONNECTED_BIT) or connection failed for the maximum
// number of re-tries (WIFI_FAIL_BIT). The bits are set by event_handler() (see above)
EventBits_t bits = xEventGroupWaitBits(s_wifi_event_group,
WIFI_CONNECTED_BIT | WIFI_FAIL_BIT,
pdFALSE,
pdFALSE,
portMAX_DELAY);
// xEventGroupWaitBits() returns the bits before the call returned, hence we can test which event actually
// happened.
if (bits & WIFI_CONNECTED_BIT) {
ESP_LOGI(TAG, "connected to ap SSID:%s password:%s",
ssid.c_str(), passphrase.c_str());
} else if (bits & WIFI_FAIL_BIT) {
ESP_LOGI(TAG, "Failed to connect to SSID:%s, password:%s",
ssid.c_str(), passphrase.c_str());
} else {
ESP_LOGE(TAG, "UNEXPECTED EVENT");
}
// The event will not be processed after unregister
ESP_ERROR_CHECK(esp_event_handler_instance_unregister(IP_EVENT, IP_EVENT_STA_GOT_IP, instance_got_ip));
ESP_ERROR_CHECK(esp_event_handler_instance_unregister(WIFI_EVENT, ESP_EVENT_ANY_ID, instance_any_id));
vEventGroupDelete(s_wifi_event_group);
tcpip_adapter_ip_info_t ip_info2;
ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info2));
ipaddress = std::string(ip4addr_ntoa(&ip_info2.ip));
netmask = std::string(ip4addr_ntoa(&ip_info2.netmask));
gw = std::string(ip4addr_ntoa(&ip_info2.gw));
}
void ConnectToWLAN()
{
if (ipaddress.length() == 0 || gw.length() == 0 || netmask.length() == 0)
{
printf("Connect to WLAN with dyn. IP\n");
initialise_wifi();
}
else
{
printf("Connect to WLAN with fixed IP\n");
initialise_wifi_fixed_ip2();
}
}
bool ChangeHostName(std::string fn, std::string _newhostname)
{
if (_newhostname == hostname)
return false;
string line = "";
std::vector<string> zerlegt;
bool found = false;
std::vector<string> neuesfile;
FILE* pFile;
fn = FormatFileName(fn);
pFile = OpenFileAndWait(fn.c_str(), "r");
printf("file loaded\n");
if (pFile == NULL)
return false;
char zw[1024];
fgets(zw, 1024, pFile);
line = std::string(zw);
while ((line.size() > 0) || !(feof(pFile)))
{
printf("%s", line.c_str());
zerlegt = ZerlegeZeile(line, "=");
zerlegt[0] = trim(zerlegt[0], " ");
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
line = "hostname = \"" + _newhostname + "\"\n";
found = true;
}
neuesfile.push_back(line);
if (fgets(zw, 1024, pFile) == NULL)
{
line = "";
}
else
{
line = std::string(zw);
}
}
if (!found)
{
line = "hostname = \"" + _newhostname + "\"\n";
neuesfile.push_back(line);
}
fclose(pFile);
pFile = OpenFileAndWait(fn.c_str(), "w+");
for (int i = 0; i < neuesfile.size(); ++i)
{
fputs(neuesfile[i].c_str(), pFile);
}
fclose(pFile);
return true;
}
void LoadWlanFromFile(std::string fn)
{
string line = "";
std::vector<string> zerlegt;
hostname = std_hostname;
FILE* pFile;
fn = FormatFileName(fn);
pFile = OpenFileAndWait(fn.c_str(), "r");
printf("file loaded\n");
if (pFile == NULL)
return;
char zw[1024];
fgets(zw, 1024, pFile);
line = std::string(zw);
while ((line.size() > 0) || !(feof(pFile)))
{
printf("%s", line.c_str());
zerlegt = ZerlegeZeile(line, "=");
zerlegt[0] = trim(zerlegt[0], " ");
for (int i = 2; i < zerlegt.size(); ++i)
zerlegt[i] = zerlegt[i-1] + zerlegt[i];
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
hostname = trim(zerlegt[1]);
if ((hostname[0] == '"') && (hostname[hostname.length()-1] == '"')){
hostname = hostname.substr(1, hostname.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "SSID")){
ssid = trim(zerlegt[1]);
if ((ssid[0] == '"') && (ssid[ssid.length()-1] == '"')){
ssid = ssid.substr(1, ssid.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "PASSWORD")){
passphrase = zerlegt[1];
if ((passphrase[0] == '"') && (passphrase[passphrase.length()-1] == '"')){
passphrase = passphrase.substr(1, passphrase.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){
ipaddress = zerlegt[1];
if ((ipaddress[0] == '"') && (ipaddress[ipaddress.length()-1] == '"')){
ipaddress = ipaddress.substr(1, ipaddress.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){
gw = zerlegt[1];
if ((gw[0] == '"') && (gw[gw.length()-1] == '"')){
gw = gw.substr(1, gw.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){
netmask = zerlegt[1];
if ((netmask[0] == '"') && (netmask[netmask.length()-1] == '"')){
netmask = netmask.substr(1, netmask.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){
dns = zerlegt[1];
if ((dns[0] == '"') && (dns[dns.length()-1] == '"')){
dns = dns.substr(1, dns.length()-2);
}
}
if (fgets(zw, 1024, pFile) == NULL)
{
line = "";
}
else
{
line = std::string(zw);
}
}
fclose(pFile);
// Check if Hostname was empty in .ini if yes set to std_hostname
if(hostname.length() <= 0){
hostname = std_hostname;
}
printf("\nWLan: %s, %s\n", ssid.c_str(), passphrase.c_str());
printf("Hostename: %s\n", hostname.c_str());
printf("Fixed IP: %s, Gateway %s, Netmask %s, DNS %s\n", ipaddress.c_str(), gw.c_str(), netmask.c_str(), dns.c_str());
}
void LoadNetConfigFromFile(std::string _fn, std::string &_ip, std::string &_gw, std::string &_netmask, std::string &_dns)
{
string line = "";
std::vector<string> zerlegt;
FILE* pFile;
_fn = FormatFileName(_fn);
pFile = OpenFileAndWait(_fn.c_str(), "r");
if (pFile == NULL)
return;
char zw[1024];
fgets(zw, 1024, pFile);
line = std::string(zw);
while ((line.size() > 0) || !(feof(pFile)))
{
printf("%s", line.c_str());
zerlegt = ZerlegeZeile(line, "=");
zerlegt[0] = trim(zerlegt[0], " ");
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){
_ip = zerlegt[1];
if ((_ip[0] == '"') && (_ip[_ip.length()-1] == '"')){
_ip = _ip.substr(1, _ip.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){
_gw = zerlegt[1];
if ((_gw[0] == '"') && (_gw[_gw.length()-1] == '"')){
_gw = _gw.substr(1, _gw.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){
_netmask = zerlegt[1];
if ((_netmask[0] == '"') && (_netmask[_netmask.length()-1] == '"')){
_netmask = _netmask.substr(1, _netmask.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){
_dns = zerlegt[1];
if ((_dns[0] == '"') && (_dns[_dns.length()-1] == '"')){
_dns = _dns.substr(1, _dns.length()-2);
}
}
if (fgets(zw, 1024, pFile) == NULL)
{
line = "";
}
else
{
line = std::string(zw);
}
}
fclose(pFile);
}
std::string getHostname(){
return hostname;
}
std::string getIPAddress(){
return ipaddress;
}
std::string getSSID(){
return ssid;
}
std::string getNetMask(){
return netmask;
}
std::string getGW(){
return gw;
}

View File

@@ -1,21 +0,0 @@
//#ifndef CONNECT_WLAN_H
//#define CONNECT_WLAN_H
#include <string>
#include "driver/gpio.h"
const int CONNECTED_BIT = BIT0;
void ConnectToWLAN();
void LoadWlanFromFile(std::string fn);
bool ChangeHostName(std::string fn, std::string _newhostname);
std::string getHostname();
std::string getIPAddress();
std::string getSSID();
std::string getNetMask();
std::string getGW();
//#endif

View File

@@ -1,489 +0,0 @@
#include "connect_wlan.h"
#include <string.h>
#include "esp_wifi.h"
#include "esp_event_loop.h"
#include "freertos/event_groups.h"
#include "esp_log.h"
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <iostream>
#include <arpa/inet.h>
#include "Helper.h"
static const char *MAIN_TAG = "connect_wlan";
std::string ssid = "";
std::string passphrase = "";
std::string hostname = "";
std::string ipaddress = "";
std::string gw = "";
std::string netmask = "";
std::string dns = "";
std::string std_hostname = "watermeter";
static EventGroupHandle_t wifi_event_group;
#define BLINK_GPIO GPIO_NUM_33
std::vector<string> ZerlegeZeile(std::string input, std::string _delimiter = "")
{
std::vector<string> Output;
std::string delimiter = " =,";
if (_delimiter.length() > 0){
delimiter = _delimiter;
}
input = trim(input, delimiter);
size_t pos = findDelimiterPos(input, delimiter);
std::string token;
while (pos != std::string::npos) {
token = input.substr(0, pos);
token = trim(token, delimiter);
Output.push_back(token);
input.erase(0, pos + 1);
input = trim(input, delimiter);
pos = findDelimiterPos(input, delimiter);
}
Output.push_back(input);
return Output;
}
void wifi_connect(){
wifi_config_t cfg = { };
strcpy((char*)cfg.sta.ssid, (const char*)ssid.c_str());
strcpy((char*)cfg.sta.password, (const char*)passphrase.c_str());
ESP_ERROR_CHECK( esp_wifi_disconnect() );
ESP_ERROR_CHECK( esp_wifi_set_config(ESP_IF_WIFI_STA, &cfg) );
ESP_ERROR_CHECK( esp_wifi_connect() );
}
void blinkstatus(int dauer, int _anzahl)
{
gpio_reset_pin(BLINK_GPIO);
gpio_set_direction(BLINK_GPIO, GPIO_MODE_OUTPUT);
for (int i = 0; i < _anzahl; ++i)
{
gpio_set_level(BLINK_GPIO, 0);
vTaskDelay(dauer / portTICK_PERIOD_MS);
gpio_set_level(BLINK_GPIO, 1);
vTaskDelay(dauer / portTICK_PERIOD_MS);
}
}
static esp_err_t event_handler(void *ctx, system_event_t *event)
{
switch(event->event_id) {
case SYSTEM_EVENT_STA_START:
blinkstatus(200, 1);
wifi_connect();
break;
case SYSTEM_EVENT_STA_GOT_IP:
xEventGroupSetBits(wifi_event_group, CONNECTED_BIT);
blinkstatus(1000, 3);
break;
case SYSTEM_EVENT_STA_DISCONNECTED:
blinkstatus(200, 5);
esp_wifi_connect();
xEventGroupClearBits(wifi_event_group, CONNECTED_BIT);
break;
default:
break;
}
return ESP_OK;
}
void initialise_wifi()
{
ESP_ERROR_CHECK(esp_event_loop_init(event_handler, NULL) );
wifi_event_group = xEventGroupCreate();
esp_log_level_set("wifi", ESP_LOG_NONE); // disable wifi driver logging
tcpip_adapter_init();
wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
ESP_ERROR_CHECK( esp_wifi_init(&cfg) );
ESP_ERROR_CHECK( esp_wifi_set_mode(WIFI_MODE_STA) );
ESP_ERROR_CHECK( esp_wifi_start() );
esp_err_t ret = tcpip_adapter_set_hostname(TCPIP_ADAPTER_IF_STA , hostname.c_str());
if(ret != ESP_OK ){
ESP_LOGE(MAIN_TAG,"failed to set hostname:%d",ret);
}
xEventGroupWaitBits(wifi_event_group,CONNECTED_BIT,true,true,portMAX_DELAY);
tcpip_adapter_ip_info_t ip_info;
ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info));
ipaddress = std::string(ip4addr_ntoa(&ip_info.ip));
netmask = std::string(ip4addr_ntoa(&ip_info.netmask));
gw = std::string(ip4addr_ntoa(&ip_info.gw));
printf("IPv4 : %s\n", ip4addr_ntoa(&ip_info.ip));
printf("HostName : %s\n", hostname.c_str());
}
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
void strinttoip4(std::string ip, int &a, int &b, int &c, int &d) {
std::stringstream s(ip);
char ch; //to temporarily store the '.'
s >> a >> ch >> b >> ch >> c >> ch >> d;
}
void initialise_wifi_fixed_ip()
{
wifi_event_group = xEventGroupCreate();
ESP_ERROR_CHECK(esp_netif_init());
ESP_ERROR_CHECK(esp_event_loop_create_default());
esp_netif_t *my_sta = esp_netif_create_default_wifi_sta();
esp_netif_dhcpc_stop(my_sta);
esp_netif_ip_info_t ip_info;
int a, b, c, d;
strinttoip4(ipaddress, a, b, c, d);
IP4_ADDR(&ip_info.ip, a, b, c, d);
strinttoip4(gw, a, b, c, d);
IP4_ADDR(&ip_info.gw, a, b, c, d);
strinttoip4(netmask, a, b, c, d);
IP4_ADDR(&ip_info.netmask, a, b, c, d);
esp_netif_set_ip_info(my_sta, &ip_info);
wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
ESP_ERROR_CHECK(esp_wifi_init(&cfg));
if (dns.length() > 0) {
esp_netif_dns_info_t dns_info;
ip4_addr_t ip;
ip.addr = esp_ip4addr_aton(dns.c_str());
ip_addr_set_ip4_u32(&dns_info.ip, ip.addr);
ESP_ERROR_CHECK(esp_netif_set_dns_info(my_sta, ESP_NETIF_DNS_MAIN, &dns_info));
}
ESP_ERROR_CHECK(esp_event_loop_init(event_handler, NULL) );
wifi_config_t wifi_config = { };
strcpy((char*)wifi_config.sta.ssid, (const char*)ssid.c_str());
strcpy((char*)wifi_config.sta.password, (const char*)passphrase.c_str());
ESP_ERROR_CHECK(esp_wifi_set_mode(WIFI_MODE_STA) );
ESP_ERROR_CHECK(esp_wifi_set_config(ESP_IF_WIFI_STA, &wifi_config) );
ESP_ERROR_CHECK(esp_wifi_start() );
ESP_LOGI(MAIN_TAG, "wifi_init_sta finished.");
EventBits_t bits = xEventGroupWaitBits(wifi_event_group,CONNECTED_BIT,true,true,portMAX_DELAY);
if (bits & CONNECTED_BIT) {
ESP_LOGI(MAIN_TAG, "connected to ap SSID:%s password:%s",
ssid.c_str(), passphrase.c_str());
} else {
ESP_LOGI(MAIN_TAG, "Failed to connect to SSID:%s, password:%s",
ssid.c_str(), passphrase.c_str());
}
tcpip_adapter_ip_info_t ip_info2;
ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info2));
ipaddress = std::string(ip4addr_ntoa(&ip_info2.ip));
netmask = std::string(ip4addr_ntoa(&ip_info2.netmask));
gw = std::string(ip4addr_ntoa(&ip_info2.gw));
// vEventGroupDelete(wifi_event_group);
}
void ConnectToWLAN()
{
if (ipaddress.length() == 0 || gw.length() == 0 || netmask.length() == 0)
{
printf("Connect to WLAN with dyn. IP\n");
initialise_wifi();
}
else
{
printf("Connect to WLAN with fixed IP\n");
initialise_wifi_fixed_ip();
}
}
bool ChangeHostName(std::string fn, std::string _newhostname)
{
if (_newhostname == hostname)
return false;
string line = "";
std::vector<string> zerlegt;
bool found = false;
std::vector<string> neuesfile;
FILE* pFile;
fn = FormatFileName(fn);
pFile = OpenFileAndWait(fn.c_str(), "r");
printf("file loaded\n");
if (pFile == NULL)
return false;
char zw[1024];
fgets(zw, 1024, pFile);
line = std::string(zw);
while ((line.size() > 0) || !(feof(pFile)))
{
printf("%s", line.c_str());
zerlegt = ZerlegeZeile(line, "=");
zerlegt[0] = trim(zerlegt[0], " ");
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
line = "hostname = \"" + _newhostname + "\"\n";
found = true;
}
neuesfile.push_back(line);
if (fgets(zw, 1024, pFile) == NULL)
{
line = "";
}
else
{
line = std::string(zw);
}
}
if (!found)
{
line = "hostname = \"" + _newhostname + "\"\n";
neuesfile.push_back(line);
}
fclose(pFile);
pFile = OpenFileAndWait(fn.c_str(), "w+");
for (int i = 0; i < neuesfile.size(); ++i)
{
fputs(neuesfile[i].c_str(), pFile);
}
fclose(pFile);
return true;
}
void LoadWlanFromFile(std::string fn)
{
string line = "";
std::vector<string> zerlegt;
hostname = std_hostname;
FILE* pFile;
fn = FormatFileName(fn);
pFile = OpenFileAndWait(fn.c_str(), "r");
printf("file loaded\n");
if (pFile == NULL)
return;
char zw[1024];
fgets(zw, 1024, pFile);
line = std::string(zw);
while ((line.size() > 0) || !(feof(pFile)))
{
printf("%s", line.c_str());
zerlegt = ZerlegeZeile(line, "=");
zerlegt[0] = trim(zerlegt[0], " ");
for (int i = 2; i < zerlegt.size(); ++i)
zerlegt[i] = zerlegt[i-1] + zerlegt[i];
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
hostname = trim(zerlegt[1]);
if ((hostname[0] == '"') && (hostname[hostname.length()-1] == '"')){
hostname = hostname.substr(1, hostname.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "SSID")){
ssid = trim(zerlegt[1]);
if ((ssid[0] == '"') && (ssid[ssid.length()-1] == '"')){
ssid = ssid.substr(1, ssid.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "PASSWORD")){
passphrase = zerlegt[1];
if ((passphrase[0] == '"') && (passphrase[passphrase.length()-1] == '"')){
passphrase = passphrase.substr(1, passphrase.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){
ipaddress = zerlegt[1];
if ((ipaddress[0] == '"') && (ipaddress[ipaddress.length()-1] == '"')){
ipaddress = ipaddress.substr(1, ipaddress.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){
gw = zerlegt[1];
if ((gw[0] == '"') && (gw[gw.length()-1] == '"')){
gw = gw.substr(1, gw.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){
netmask = zerlegt[1];
if ((netmask[0] == '"') && (netmask[netmask.length()-1] == '"')){
netmask = netmask.substr(1, netmask.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){
dns = zerlegt[1];
if ((dns[0] == '"') && (dns[dns.length()-1] == '"')){
dns = dns.substr(1, dns.length()-2);
}
}
if (fgets(zw, 1024, pFile) == NULL)
{
line = "";
}
else
{
line = std::string(zw);
}
}
fclose(pFile);
// Check if Hostname was empty in .ini if yes set to std_hostname
if(hostname.length() <= 0){
hostname = std_hostname;
}
printf("\nWLan: %s, %s\n", ssid.c_str(), passphrase.c_str());
printf("Hostename: %s\n", hostname.c_str());
printf("Fixed IP: %s, Gateway %s, Netmask %s, DNS %s\n", ipaddress.c_str(), gw.c_str(), netmask.c_str(), dns.c_str());
}
void LoadNetConfigFromFile(std::string fn, std::string &_ip, std::string &_gw, std::string &_netmask, std::string &_dns)
{
string line = "";
std::vector<string> zerlegt;
FILE* pFile;
fn = FormatFileName(fn);
pFile = OpenFileAndWait(fn.c_str(), "r");
printf("file loaded\n");
if (pFile == NULL)
return;
char zw[1024];
fgets(zw, 1024, pFile);
line = std::string(zw);
while ((line.size() > 0) || !(feof(pFile)))
{
printf("%s", line.c_str());
zerlegt = ZerlegeZeile(line, "=");
zerlegt[0] = trim(zerlegt[0], " ");
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){
_ip = zerlegt[1];
if ((_ip[0] == '"') && (_ip[_ip.length()-1] == '"')){
_ip = _ip.substr(1, _ip.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){
_gw = zerlegt[1];
if ((_gw[0] == '"') && (_gw[_gw.length()-1] == '"')){
_gw = _gw.substr(1, _gw.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){
_netmask = zerlegt[1];
if ((_netmask[0] == '"') && (_netmask[_netmask.length()-1] == '"')){
_netmask = _netmask.substr(1, _netmask.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){
_dns = zerlegt[1];
if ((_dns[0] == '"') && (_dns[_dns.length()-1] == '"')){
_dns = _dns.substr(1, _dns.length()-2);
}
}
if (fgets(zw, 1024, pFile) == NULL)
{
line = "";
}
else
{
line = std::string(zw);
}
}
fclose(pFile);
}
std::string getHostname(){
return hostname;
}
std::string getIPAddress(){
return ipaddress;
}
std::string getSSID(){
return ssid;
}
std::string getNetMask(){
return netmask;
}
std::string getGW(){
return gw;
}

View File

@@ -1,20 +0,0 @@
#ifndef CONNECT_WLAN_H
#define CONNECT_WLAN_H
#include <string>
#include "driver/gpio.h"
const int CONNECTED_BIT = BIT0;
void ConnectToWLAN();
void LoadWlanFromFile(std::string fn);
bool ChangeHostName(std::string fn, std::string _newhostname);
std::string getHostname();
std::string getIPAddress();
std::string getSSID();
std::string getNetMask();
std::string getGW();
#endif

View File

@@ -62,7 +62,8 @@ bool frame2jpg_cb(camera_fb_t * fb, uint8_t quality, jpg_out_cb cb, void * arg);
* @param height Height in pixels of the source image
* @param format Format of the source image
* @param quality JPEG quality of the resulting image
* @param out Pointer to be populated with the address of the resulting buffer
* @param out Pointer to be populated with the address of the resulting buffer.
* You MUST free the pointer once you are done with it.
* @param out_len Pointer to be populated with the length of the output buffer
*
* @return true on success

View File

@@ -317,7 +317,7 @@ bool fmt2bmp(uint8_t *src, size_t src_len, uint16_t width, uint16_t height, pixf
}
*out = out_buf;
*out_len = out_size;
return true;
return true;
}
bool frame2bmp(camera_fb_t * fb, uint8_t ** out, size_t * out_len)

View File

@@ -1321,7 +1321,7 @@ esp_err_t camera_init(const camera_config_t* config)
}
vsync_intr_disable();
err = gpio_install_isr_service(ESP_INTR_FLAG_LEVEL1 | ESP_INTR_FLAG_IRAM);
err = gpio_install_isr_service(ESP_INTR_FLAG_LOWMED | ESP_INTR_FLAG_IRAM);
if (err != ESP_OK) {
if (err != ESP_ERR_INVALID_STATE) {
ESP_LOGE(TAG, "gpio_install_isr_service failed (%x)", err);

View File

@@ -29,7 +29,7 @@
// ================================ CODE ======================================
#include <esp_event_loop.h>
#include <esp_event.h>
#include <esp_log.h>
#include <esp_system.h>
#include <nvs_flash.h>

View File

@@ -1,5 +1,3 @@
name: "esp32-camera"
version: "1.0.0"
description: This package hosts ESP32 compatible driver for OV2640 image sensors. Additionally it provides a few tools, which allow converting the captured frame data to the more common BMP and JPEG formats.
url: https://github.com/espressif/esp32-camera

View File

@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
REQUIRES jomjol_helper)
REQUIRES jomjol_logfile)

View File

@@ -0,0 +1,105 @@
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "Helper.h"
#include "configFile.h"
//static const char *TAGCONFIGFILE = "configFile";
ConfigFile::ConfigFile(std::string filePath)
{
std::string config = FormatFileName(filePath);
pFile = OpenFileAndWait(config.c_str(), "r");
}
ConfigFile::~ConfigFile()
{
fclose(pFile);
}
bool ConfigFile::isNewParagraph(std::string input)
{
if ((input[0] == '[') || ((input[0] == ';') && (input[1] == '[')))
{
return true;
}
return false;
}
bool ConfigFile::GetNextParagraph(std::string& aktparamgraph, bool &disabled, bool &eof)
{
while (getNextLine(&aktparamgraph, disabled, eof) && !isNewParagraph(aktparamgraph));
if (isNewParagraph(aktparamgraph))
return true;
return false;
}
bool ConfigFile::getNextLine(std::string *rt, bool &disabled, bool &eof)
{
eof = false;
char zw[1024] = "";
if (pFile == NULL)
{
*rt = "";
return false;
}
if (fgets(zw, 1024, pFile))
{
printf("%s", zw);
if ((strlen(zw) == 0) && feof(pFile))
{
*rt = "";
eof = true;
return false;
}
}
else
{
*rt = "";
eof = true;
return false;
}
*rt = zw;
*rt = trim(*rt);
while ((zw[0] == ';' || zw[0] == '#' || (rt->size() == 0)) && !(zw[1] == '[')) // Kommentarzeilen (; oder #) und Leerzeilen überspringen, es sei denn es ist ein neuer auskommentierter Paragraph
{
fgets(zw, 1024, pFile);
printf("%s", zw);
if (feof(pFile))
{
*rt = "";
eof = true;
return false;
}
*rt = zw;
*rt = trim(*rt);
}
disabled = ((*rt)[0] == ';');
return true;
}
std::vector<string> ConfigFile::ZerlegeZeile(std::string input, std::string delimiter)
{
std::vector<string> Output;
// std::string delimiter = " =,";
input = trim(input, delimiter);
size_t pos = findDelimiterPos(input, delimiter);
std::string token;
while (pos != std::string::npos) {
token = input.substr(0, pos);
token = trim(token, delimiter);
Output.push_back(token);
input.erase(0, pos + 1);
input = trim(input, delimiter);
pos = findDelimiterPos(input, delimiter);
}
Output.push_back(input);
return Output;
}

View File

@@ -0,0 +1,16 @@
#include <string>
#include <vector>
class ConfigFile {
public:
ConfigFile(std::string filePath);
~ConfigFile();
bool isNewParagraph(std::string input);
bool GetNextParagraph(std::string& aktparamgraph, bool &disabled, bool &eof);
bool getNextLine(std::string* rt, bool &disabled, bool &eof);
std::vector<std::string> ZerlegeZeile(std::string input, std::string delimiter = " =, \t");
private:
FILE* pFile;
};

View File

@@ -3,7 +3,7 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
list(APPEND EXTRA_COMPONENT_DIRS $ENV{IDF_PATH}/examples/common_components/protocol_examples_common)
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
REQUIRES esp_http_server jomjol_logfile)
INCLUDE_DIRS "." "../../include"
REQUIRES esp_http_server jomjol_logfile jomjol_configfile jomjol_mqtt jomjol_flowcontroll)

View File

@@ -1,4 +1,5 @@
#include <string>
#include <functional>
#include "string.h"
#include <string.h>
@@ -6,22 +7,395 @@
#include "freertos/task.h"
#include "esp_system.h"
#include "esp_event.h"
#include "server_tflite.h"
//#define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
#include "esp_log.h"
#include "driver/gpio.h"
//#include "errno.h"
#include <sys/stat.h>
#include <vector>
//#include <regex>
#include "defines.h"
#include "server_GPIO.h"
#include "ClassLogFile.h"
#include "configFile.h"
#include "Helper.h"
#include "interface_mqtt.h"
// #define DEBUG_DETAIL_ON
static const char *TAG_SERVERGPIO = "server_GPIO";
QueueHandle_t gpio_queue_handle = NULL;
esp_err_t handler_switch_GPIO(httpd_req_t *req)
#define DEBUG_DETAIL_ON
GpioPin::GpioPin(gpio_num_t gpio, const char* name, gpio_pin_mode_t mode, gpio_int_type_t interruptType, uint8_t dutyResolution, std::string mqttTopic, bool httpEnable)
{
_gpio = gpio;
_name = name;
_mode = mode;
_interruptType = interruptType;
_mqttTopic = mqttTopic;
}
GpioPin::~GpioPin()
{
ESP_LOGD(TAG_SERVERGPIO,"reset GPIO pin %d", _gpio);
if (_interruptType != GPIO_INTR_DISABLE) {
//hook isr handler for specific gpio pin
gpio_isr_handler_remove(_gpio);
}
gpio_reset_pin(_gpio);
}
static void IRAM_ATTR gpio_isr_handler(void* arg)
{
GpioResult gpioResult;
gpioResult.gpio = *(gpio_num_t*) arg;
gpioResult.value = gpio_get_level(gpioResult.gpio);
BaseType_t ContextSwitchRequest = pdFALSE;
xQueueSendToBackFromISR(gpio_queue_handle,(void*)&gpioResult,&ContextSwitchRequest);
if(ContextSwitchRequest){
taskYIELD();
}
}
static void gpioHandlerTask(void *arg) {
ESP_LOGD(TAG_SERVERGPIO,"start interrupt task");
while(1){
if(uxQueueMessagesWaiting(gpio_queue_handle)){
while(uxQueueMessagesWaiting(gpio_queue_handle)){
GpioResult gpioResult;
xQueueReceive(gpio_queue_handle,(void*)&gpioResult,10);
ESP_LOGD(TAG_SERVERGPIO,"gpio: %d state: %d", gpioResult.gpio, gpioResult.value);
((GpioHandler*)arg)->gpioInterrupt(&gpioResult);
}
}
((GpioHandler*)arg)->taskHandler();
vTaskDelay(pdMS_TO_TICKS(1000));
}
}
void GpioPin::gpioInterrupt(int value) {
if (_mqttTopic != "") {
ESP_LOGD(TAG_SERVERGPIO, "gpioInterrupt %s %d", _mqttTopic.c_str(), value);
MQTTPublish(_mqttTopic, value ? "true" : "false");
currentState = value;
}
}
void GpioPin::init()
{
gpio_config_t io_conf;
//set interrupt
io_conf.intr_type = _interruptType;
//set as output mode
io_conf.mode = (_mode == GPIO_PIN_MODE_OUTPUT) || (_mode == GPIO_PIN_MODE_BUILT_IN_FLASH_LED) ? gpio_mode_t::GPIO_MODE_OUTPUT : gpio_mode_t::GPIO_MODE_INPUT;
//bit mask of the pins that you want to set,e.g.GPIO18/19
io_conf.pin_bit_mask = (1ULL << _gpio);
//set pull-down mode
io_conf.pull_down_en = _mode == GPIO_PIN_MODE_INPUT_PULLDOWN ? gpio_pulldown_t::GPIO_PULLDOWN_ENABLE : gpio_pulldown_t::GPIO_PULLDOWN_DISABLE;
//set pull-up mode
io_conf.pull_up_en = _mode == GPIO_PIN_MODE_INPUT_PULLDOWN ? gpio_pullup_t::GPIO_PULLUP_ENABLE : gpio_pullup_t::GPIO_PULLUP_DISABLE;
//configure GPIO with the given settings
gpio_config(&io_conf);
if (_interruptType != GPIO_INTR_DISABLE) {
//hook isr handler for specific gpio pin
ESP_LOGD(TAG_SERVERGPIO, "GpioPin::init add isr handler for GPIO %d\r\n", _gpio);
gpio_isr_handler_add(_gpio, gpio_isr_handler, (void*)&_gpio);
}
if ((_mqttTopic != "") && ((_mode == GPIO_PIN_MODE_OUTPUT) || (_mode == GPIO_PIN_MODE_OUTPUT_PWM) || (_mode == GPIO_PIN_MODE_BUILT_IN_FLASH_LED))) {
std::function<bool(std::string, char*, int)> f = std::bind(&GpioPin::handleMQTT, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
MQTTregisterSubscribeFunction(_mqttTopic, f);
}
}
bool GpioPin::getValue(std::string* errorText)
{
if ((_mode != GPIO_PIN_MODE_INPUT) && (_mode != GPIO_PIN_MODE_INPUT_PULLUP) && (_mode != GPIO_PIN_MODE_INPUT_PULLDOWN)) {
(*errorText) = "GPIO is not in input mode";
}
return gpio_get_level(_gpio) == 1;
}
void GpioPin::setValue(bool value, gpio_set_source setSource, std::string* errorText)
{
ESP_LOGD(TAG_SERVERGPIO, "GpioPin::setValue %d\r\n", value);
if ((_mode != GPIO_PIN_MODE_OUTPUT) && (_mode != GPIO_PIN_MODE_OUTPUT_PWM) && (_mode != GPIO_PIN_MODE_BUILT_IN_FLASH_LED)) {
(*errorText) = "GPIO is not in output mode";
} else {
gpio_set_level(_gpio, value);
if ((_mqttTopic != "") && (setSource != GPIO_SET_SOURCE_MQTT)) {
MQTTPublish(_mqttTopic, value ? "true" : "false");
}
}
}
void GpioPin::publishState() {
int newState = gpio_get_level(_gpio);
if (newState != currentState) {
ESP_LOGD(TAG_SERVERGPIO,"publish state of GPIO %d new state %d", _gpio, newState);
MQTTPublish(_mqttTopic, newState ? "true" : "false");
currentState = newState;
}
}
bool GpioPin::handleMQTT(std::string, char* data, int data_len) {
ESP_LOGD(TAG_SERVERGPIO, "GpioPin::handleMQTT data %.*s\r\n", data_len, data);
std::string dataStr(data, data_len);
dataStr = toLower(dataStr);
std::string errorText = "";
if ((dataStr == "true") || (dataStr == "1")) {
setValue(true, GPIO_SET_SOURCE_MQTT, &errorText);
} else if ((dataStr == "false") || (dataStr == "0")) {
setValue(false, GPIO_SET_SOURCE_MQTT, &errorText);
} else {
errorText = "wrong value ";
errorText.append(data, data_len);
}
if (errorText != "") {
ESP_LOGE(TAG_SERVERGPIO, "%s", errorText.c_str());
}
return (errorText == "");
}
esp_err_t callHandleHttpRequest(httpd_req_t *req)
{
ESP_LOGD(TAG_SERVERGPIO,"callHandleHttpRequest");
GpioHandler *gpioHandler = (GpioHandler*)req->user_ctx;
return gpioHandler->handleHttpRequest(req);
}
void taskGpioHandler(void *pvParameter)
{
ESP_LOGD(TAG_SERVERGPIO,"taskGpioHandler");
((GpioHandler*)pvParameter)->init();
}
GpioHandler::GpioHandler(std::string configFile, httpd_handle_t httpServer)
{
ESP_LOGI(TAG_SERVERGPIO,"start GpioHandler");
_configFile = configFile;
_httpServer = httpServer;
ESP_LOGI(TAG_SERVERGPIO, "register GPIO Uri");
registerGpioUri();
}
GpioHandler::~GpioHandler() {
if (gpioMap != NULL) {
clear();
delete gpioMap;
}
}
void GpioHandler::init()
{
// TickType_t xDelay = 60000 / portTICK_PERIOD_MS;
// printf("wait before start %ldms\r\n", (long) xDelay);
// vTaskDelay( xDelay );
if (gpioMap == NULL) {
gpioMap = new std::map<gpio_num_t, GpioPin*>();
} else {
clear();
}
ESP_LOGI(TAG_SERVERGPIO, "read GPIO config and init GPIO");
if (!readConfig()) {
clear();
delete gpioMap;
gpioMap = NULL;
ESP_LOGI(TAG_SERVERGPIO, "GPIO init comleted, handler is disabled");
return;
}
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it) {
it->second->init();
}
std::function<void()> f = std::bind(&GpioHandler::handleMQTTconnect, this);
MQTTregisterConnectFunction("gpio-handler", f);
if (xHandleTaskGpio == NULL) {
gpio_queue_handle = xQueueCreate(10,sizeof(GpioResult));
BaseType_t xReturned = xTaskCreate(&gpioHandlerTask, "gpio_int", configMINIMAL_STACK_SIZE * 8, (void *)this, tskIDLE_PRIORITY + 2, &xHandleTaskGpio);
if(xReturned == pdPASS ) {
ESP_LOGD(TAG_SERVERGPIO, "xHandletaskGpioHandler started");
} else {
ESP_LOGD(TAG_SERVERGPIO, "xHandletaskGpioHandler not started %d ", (int)xHandleTaskGpio);
}
}
ESP_LOGI(TAG_SERVERGPIO, "GPIO init comleted, is enabled");
}
void GpioHandler::taskHandler() {
if (gpioMap != NULL) {
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it) {
if ((it->second->getInterruptType() == GPIO_INTR_DISABLE))
it->second->publishState();
}
}
}
void GpioHandler::handleMQTTconnect()
{
if (gpioMap != NULL) {
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it) {
if ((it->second->getMode() == GPIO_PIN_MODE_INPUT) || (it->second->getMode() == GPIO_PIN_MODE_INPUT_PULLDOWN) || (it->second->getMode() == GPIO_PIN_MODE_INPUT_PULLUP))
it->second->publishState();
}
}
}
void GpioHandler::deinit() {
MQTTunregisterConnectFunction("gpio-handler");
clear();
if (xHandleTaskGpio != NULL) {
vTaskDelete(xHandleTaskGpio);
xHandleTaskGpio = NULL;
}
}
void GpioHandler::gpioInterrupt(GpioResult* gpioResult) {
if ((gpioMap != NULL) && (gpioMap->find(gpioResult->gpio) != gpioMap->end())) {
(*gpioMap)[gpioResult->gpio]->gpioInterrupt(gpioResult->value);
}
}
bool GpioHandler::readConfig()
{
if (!gpioMap->empty())
clear();
ConfigFile configFile = ConfigFile(_configFile);
std::vector<std::string> zerlegt;
std::string line = "";
bool disabledLine = false;
bool eof = false;
while ((!configFile.GetNextParagraph(line, disabledLine, eof) || (line.compare("[GPIO]") != 0)) && !disabledLine && !eof) {}
if (eof)
return false;
_isEnabled = !disabledLine;
if (!_isEnabled)
return false;
// std::string mainTopicMQTT = "";
std::string mainTopicMQTT = GetMQTTMainTopic();
if (mainTopicMQTT.length() > 0)
{
mainTopicMQTT = mainTopicMQTT + "/GPIO";
ESP_LOGD(TAG_SERVERGPIO, "MAINTOPICMQTT found\r\n");
}
bool registerISR = false;
while (configFile.getNextLine(&line, disabledLine, eof) && !configFile.isNewParagraph(line))
{
zerlegt = configFile.ZerlegeZeile(line);
// const std::regex pieces_regex("IO([0-9]{1,2})");
// std::smatch pieces_match;
// if (std::regex_match(zerlegt[0], pieces_match, pieces_regex) && (pieces_match.size() == 2))
// {
// std::string gpioStr = pieces_match[1];
ESP_LOGD(TAG_SERVERGPIO, "conf param %s\r\n", toUpper(zerlegt[0]).c_str());
if (toUpper(zerlegt[0]) == "MAINTOPICMQTT") {
// ESP_LOGD(TAG_SERVERGPIO, "MAINTOPICMQTT found\r\n");
// mainTopicMQTT = zerlegt[1];
} else if ((zerlegt[0].rfind("IO", 0) == 0) && (zerlegt.size() >= 6))
{
ESP_LOGI(TAG_SERVERGPIO,"Enable GP%s in %s mode", zerlegt[0].c_str(), zerlegt[1].c_str());
std::string gpioStr = zerlegt[0].substr(2, 2);
gpio_num_t gpioNr = (gpio_num_t)atoi(gpioStr.c_str());
gpio_pin_mode_t pinMode = resolvePinMode(toLower(zerlegt[1]));
gpio_int_type_t intType = resolveIntType(toLower(zerlegt[2]));
uint16_t dutyResolution = (uint8_t)atoi(zerlegt[3].c_str());
bool mqttEnabled = toLower(zerlegt[4]) == "true";
bool httpEnabled = toLower(zerlegt[5]) == "true";
char gpioName[100];
if (zerlegt.size() >= 7) {
strcpy(gpioName, trim(zerlegt[6]).c_str());
} else {
sprintf(gpioName, "GPIO%d", gpioNr);
}
std::string mqttTopic = mqttEnabled ? (mainTopicMQTT + "/" + gpioName) : "";
GpioPin* gpioPin = new GpioPin(gpioNr, gpioName, pinMode, intType,dutyResolution, mqttTopic, httpEnabled);
(*gpioMap)[gpioNr] = gpioPin;
if (intType != GPIO_INTR_DISABLE) {
registerISR = true;
}
}
}
if (registerISR) {
//install gpio isr service
gpio_install_isr_service(ESP_INTR_FLAG_LOWMED | ESP_INTR_FLAG_IRAM);
}
return true;
}
void GpioHandler::clear()
{
ESP_LOGD(TAG_SERVERGPIO, "GpioHandler::clear\r\n");
if (gpioMap != NULL) {
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it) {
delete it->second;
}
gpioMap->clear();
}
// gpio_uninstall_isr_service(); can't uninstall, isr service is used by camera
}
void GpioHandler::registerGpioUri()
{
ESP_LOGI(TAG_SERVERGPIO, "server_GPIO - Registering URI handlers");
httpd_uri_t camuri = { };
camuri.method = HTTP_GET;
camuri.uri = "/GPIO";
camuri.handler = callHandleHttpRequest;
camuri.user_ctx = (void*)this;
httpd_register_uri_handler(_httpServer, &camuri);
}
esp_err_t GpioHandler::handleHttpRequest(httpd_req_t *req)
{
ESP_LOGD(TAG_SERVERGPIO, "handleHttpRequest");
if (gpioMap == NULL) {
std::string resp_str = "GPIO handler not initialized";
httpd_resp_send(req, resp_str.c_str(), resp_str.length());
return ESP_OK;
}
#ifdef DEBUG_DETAIL_ON
LogFile.WriteHeapInfo("handler_switch_GPIO - Start");
#endif
@@ -30,95 +404,183 @@ esp_err_t handler_switch_GPIO(httpd_req_t *req)
char _query[200];
char _valueGPIO[30];
char _valueStatus[30];
std::string gpio, status, zw;
int gpionum = 0;
gpio_num_t gpio_num;
std::string gpio, status;
if (httpd_req_get_url_query_str(req, _query, 200) == ESP_OK)
{
printf("Query: "); printf(_query); printf("\n");
if (httpd_req_get_url_query_str(req, _query, 200) == ESP_OK) {
ESP_LOGD(TAG_SERVERGPIO, "Query: %s", _query);
if (httpd_query_key_value(_query, "GPIO", _valueGPIO, 30) == ESP_OK)
{
printf("GPIO is found"); printf(_valueGPIO); printf("\n");
ESP_LOGD(TAG_SERVERGPIO, "GPIO is found %s", _valueGPIO);
gpio = std::string(_valueGPIO);
} else {
std::string resp_str = "GPIO No is not defined";
httpd_resp_send(req, resp_str.c_str(), resp_str.length());
return ESP_OK;
}
if (httpd_query_key_value(_query, "Status", _valueStatus, 30) == ESP_OK)
{
printf("Status is found"); printf(_valueStatus); printf("\n");
ESP_LOGD(TAG_SERVERGPIO, "Status is found %s", _valueStatus);
status = std::string(_valueStatus);
}
};
} else {
const char* resp_str = "Error in call. Use /GPIO?GPIO=12&Status=high";
httpd_resp_send(req, resp_str, strlen(resp_str));
return ESP_OK;
}
status = toUpper(status);
if (!(status == "HIGH") && !(status == "LOW"))
if ((status != "HIGH") && (status != "LOW") && (status != "TRUE") && (status != "FALSE") && (status != "0") && (status != "1") && (status != ""))
{
zw = "Status not valid: " + status;;
std::string zw = "Status not valid: " + status;
httpd_resp_sendstr_chunk(req, zw.c_str());
httpd_resp_sendstr_chunk(req, NULL);
return ESP_OK;
}
gpionum = stoi(gpio);
// frei: 16; 12-15; 2; 4 // nur 12 und 13 funktionieren 2: reboot, 4: BlitzLED, 14/15: DMA für SDKarte ???
int gpionum = stoi(gpio);
switch (gpionum) {
case 12:
gpio_num = GPIO_NUM_12;
break;
case 13:
gpio_num = GPIO_NUM_13;
break;
default:
zw = "GPIO" + std::to_string(gpionum) + " not support - only 12 & 13 free";
// frei: 16; 12-15; 2; 4 // nur 12 und 13 funktionieren 2: reboot, 4: BlitzLED, 15: PSRAM, 14/15: DMA für SDKarte ???
gpio_num_t gpio_num = resolvePinNr(gpionum);
if (gpio_num == GPIO_NUM_NC)
{
std::string zw = "GPIO" + std::to_string(gpionum) + " not support - only 12 & 13 free";
httpd_resp_sendstr_chunk(req, zw.c_str());
httpd_resp_sendstr_chunk(req, NULL);
return ESP_OK;
return ESP_OK;
}
if (status == "HIGH")
gpio_set_level(gpio_num, 1);
if (gpioMap->count(gpio_num) == 0) {
char resp_str [30];
sprintf(resp_str, "GPIO%d is not registred", gpio_num);
httpd_resp_send(req, resp_str, strlen(resp_str));
return ESP_OK;
}
if (status == "")
{
std::string resp_str = "";
status = (*gpioMap)[gpio_num]->getValue(&resp_str) ? "HIGH" : "LOW";
if (resp_str == "") {
resp_str = status;
}
httpd_resp_sendstr_chunk(req, resp_str.c_str());
httpd_resp_sendstr_chunk(req, NULL);
}
else
gpio_set_level(gpio_num, 0);
zw = "GPIO" + std::to_string(gpionum) + " switched to " + status;
httpd_resp_sendstr_chunk(req, zw.c_str());
httpd_resp_sendstr_chunk(req, NULL);
{
std::string resp_str = "";
(*gpioMap)[gpio_num]->setValue((status == "HIGH") || (status == "TRUE") || (status == "1"), GPIO_SET_SOURCE_HTTP, &resp_str);
if (resp_str == "") {
resp_str = "GPIO" + std::to_string(gpionum) + " switched to " + status;
}
httpd_resp_sendstr_chunk(req, resp_str.c_str());
httpd_resp_sendstr_chunk(req, NULL);
}
return ESP_OK;
};
void initGPIO()
void GpioHandler::flashLightEnable(bool value)
{
gpio_config_t io_conf;
//disable interrupt
io_conf.intr_type = GPIO_INTR_DISABLE;
//set as output mode
io_conf.mode = GPIO_MODE_OUTPUT;
//bit mask of the pins that you want to set,e.g.GPIO18/19
// io_conf.pin_bit_mask = ((1ULL<<GPIO_OUTPUT_IO_0) | (1ULL<<GPIO_OUTPUT_IO_1));
// io_conf.pin_bit_mask = ((1ULL << GPIO_NUM_12) | (1ULL << GPIO_NUM_2) | (1ULL << GPIO_NUM_4) | (1ULL << GPIO_NUM_12) | (1ULL << GPIO_NUM_13) | (1ULL << GPIO_NUM_14) | (1ULL << GPIO_NUM_15));
io_conf.pin_bit_mask = ((1ULL << GPIO_NUM_12) | (1ULL << GPIO_NUM_13));
//disable pull-down mode
io_conf.pull_down_en = (gpio_pulldown_t) 0;
//disable pull-up mode
io_conf.pull_up_en = (gpio_pullup_t) 0;
//configure GPIO with the given settings
gpio_config(&io_conf);
ESP_LOGD(TAG_SERVERGPIO, "GpioHandler::flashLightEnable %s\r\n", value ? "true" : "false");
if (gpioMap != NULL) {
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it)
{
if (it->second->getMode() == GPIO_PIN_MODE_BUILT_IN_FLASH_LED) //|| (it->second->getMode() == GPIO_PIN_MODE_EXTERNAL_FLASH_PWM) || (it->second->getMode() == GPIO_PIN_MODE_EXTERNAL_FLASH_WS281X))
{
std::string resp_str = "";
it->second->setValue(value, GPIO_SET_SOURCE_INTERNAL, &resp_str);
if (resp_str == "") {
ESP_LOGD(TAG_SERVERGPIO, "Flash light pin GPIO %d switched to %s\r\n", (int)it->first, (value ? "on" : "off"));
} else {
ESP_LOGE(TAG_SERVERGPIO, "Can't set flash light pin GPIO %d. Error: %s\r\n", (int)it->first, resp_str.c_str());
}
}
}
}
}
void register_server_GPIO_uri(httpd_handle_t server)
gpio_num_t GpioHandler::resolvePinNr(uint8_t pinNr)
{
ESP_LOGI(TAGPARTGPIO, "server_GPIO - Registering URI handlers");
httpd_uri_t camuri = { };
camuri.method = HTTP_GET;
camuri.uri = "/GPIO";
camuri.handler = handler_switch_GPIO;
camuri.user_ctx = (void*) "switch GPIO";
httpd_register_uri_handler(server, &camuri);
initGPIO();
switch(pinNr) {
case 0:
return GPIO_NUM_0;
case 1:
return GPIO_NUM_1;
case 3:
return GPIO_NUM_3;
case 4:
return GPIO_NUM_4;
case 12:
return GPIO_NUM_12;
case 13:
return GPIO_NUM_13;
default:
return GPIO_NUM_NC;
}
}
gpio_pin_mode_t GpioHandler::resolvePinMode(std::string input)
{
if( input == "disabled" ) return GPIO_PIN_MODE_DISABLED;
if( input == "input" ) return GPIO_PIN_MODE_INPUT;
if( input == "input-pullup" ) return GPIO_PIN_MODE_INPUT_PULLUP;
if( input == "input-pulldown" ) return GPIO_PIN_MODE_INPUT_PULLDOWN;
if( input == "output" ) return GPIO_PIN_MODE_OUTPUT;
if( input == "built-in-led" ) return GPIO_PIN_MODE_BUILT_IN_FLASH_LED;
if( input == "output-pwm" ) return GPIO_PIN_MODE_OUTPUT_PWM;
if( input == "external-flash-pwm" ) return GPIO_PIN_MODE_EXTERNAL_FLASH_PWM;
if( input == "external-flash-ws281x" ) return GPIO_PIN_MODE_EXTERNAL_FLASH_WS281X;
return GPIO_PIN_MODE_DISABLED;
}
gpio_int_type_t GpioHandler::resolveIntType(std::string input)
{
if( input == "disabled" ) return GPIO_INTR_DISABLE;
if( input == "rising-edge" ) return GPIO_INTR_POSEDGE;
if( input == "falling-edge" ) return GPIO_INTR_NEGEDGE;
if( input == "rising-and-falling" ) return GPIO_INTR_ANYEDGE ;
if( input == "low-level-trigger" ) return GPIO_INTR_LOW_LEVEL;
if( input == "high-level-trigger" ) return GPIO_INTR_HIGH_LEVEL;
return GPIO_INTR_DISABLE;
}
static GpioHandler *gpioHandler = NULL;
void gpio_handler_create(httpd_handle_t server)
{
if (gpioHandler == NULL)
gpioHandler = new GpioHandler(CONFIG_FILE, server);
}
void gpio_handler_init()
{
if (gpioHandler != NULL) {
gpioHandler->init();
}
}
void gpio_handler_deinit() {
if (gpioHandler != NULL) {
gpioHandler->deinit();
}
}
void gpio_handler_destroy()
{
if (gpioHandler != NULL) {
delete gpioHandler;
gpioHandler = NULL;
}
}
GpioHandler* gpio_handler_get()
{
return gpioHandler;
}

View File

@@ -1,10 +1,99 @@
#ifndef SERVER_GPIO_H
#define SERVER_GPIO_H
#include <esp_log.h>
#include <esp_http_server.h>
#include <map>
#include "driver/gpio.h"
//#include "ClassControllCamera.h"
static const char *TAGPARTGPIO = "server_GPIO";
typedef enum {
GPIO_PIN_MODE_DISABLED = 0x0,
GPIO_PIN_MODE_INPUT = 0x1,
GPIO_PIN_MODE_INPUT_PULLUP = 0x2,
GPIO_PIN_MODE_INPUT_PULLDOWN = 0x3,
GPIO_PIN_MODE_OUTPUT = 0x4,
GPIO_PIN_MODE_BUILT_IN_FLASH_LED = 0x5,
GPIO_PIN_MODE_OUTPUT_PWM = 0x6,
GPIO_PIN_MODE_EXTERNAL_FLASH_PWM = 0x7,
GPIO_PIN_MODE_EXTERNAL_FLASH_WS281X = 0x8,
} gpio_pin_mode_t;
void register_server_GPIO_uri(httpd_handle_t server);
struct GpioResult {
gpio_num_t gpio;
int value;
};
typedef enum {
GPIO_SET_SOURCE_INTERNAL = 0,
GPIO_SET_SOURCE_MQTT = 1,
GPIO_SET_SOURCE_HTTP = 2,
} gpio_set_source;
class GpioPin {
public:
GpioPin(gpio_num_t gpio, const char* name, gpio_pin_mode_t mode, gpio_int_type_t interruptType, uint8_t dutyResolution, std::string mqttTopic, bool httpEnable);
~GpioPin();
void init();
bool getValue(std::string* errorText);
void setValue(bool value, gpio_set_source setSource, std::string* errorText);
bool handleMQTT(std::string, char* data, int data_len);
void publishState();
void gpioInterrupt(int value);
gpio_int_type_t getInterruptType() { return _interruptType; }
gpio_pin_mode_t getMode() { return _mode; }
private:
gpio_num_t _gpio;
const char* _name;
gpio_pin_mode_t _mode;
gpio_int_type_t _interruptType;
std::string _mqttTopic;
int currentState = -1;
};
esp_err_t callHandleHttpRequest(httpd_req_t *req);
void taskGpioHandler(void *pvParameter);
class GpioHandler {
public:
GpioHandler(std::string configFile, httpd_handle_t httpServer);
~GpioHandler();
void init();
void deinit();
void registerGpioUri();
esp_err_t handleHttpRequest(httpd_req_t *req);
void taskHandler();
void gpioInterrupt(GpioResult* gpioResult);
void flashLightEnable(bool value);
bool isEnabled() { return _isEnabled; }
void handleMQTTconnect();
private:
std::string _configFile;
httpd_handle_t _httpServer;
std::map<gpio_num_t, GpioPin*> *gpioMap = NULL;
TaskHandle_t xHandleTaskGpio = NULL;
bool _isEnabled = false;
bool readConfig();
void clear();
gpio_num_t resolvePinNr(uint8_t pinNr);
gpio_pin_mode_t resolvePinMode(std::string input);
gpio_int_type_t resolveIntType(std::string input);
};
void gpio_handler_create(httpd_handle_t server);
void gpio_handler_init();
void gpio_handler_deinit();
void gpio_handler_destroy();
GpioHandler* gpio_handler_get();
#endif //SERVER_GPIO_H

View File

@@ -4,6 +4,6 @@ list(APPEND EXTRA_COMPONENT_DIRS $ENV{IDF_PATH}/examples/common_components/proto
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
REQUIRES esp32-camera-master esp_http_server jomjol_logfile jomjol_image_proc nvs_flash)
REQUIRES esp32-camera-master esp_http_server jomjol_logfile jomjol_image_proc nvs_flash jomjol_fileserver_ota jomjol_controlGPIO)

View File

@@ -9,11 +9,14 @@
#include "Helper.h"
#include "CImageBasis.h"
#include "server_ota.h"
#include "server_GPIO.h"
#define BOARD_ESP32CAM_AITHINKER
#include <esp_event_loop.h>
#include <esp_event.h>
#include <esp_log.h>
#include <esp_system.h>
#include <nvs_flash.h>
@@ -48,7 +51,7 @@
#define CAM_PIN_HREF 23
#define CAM_PIN_PCLK 22
static const char *TAG = "example:take_picture";
static const char *TAGCAMERACLASS = "server_part_camera";
static camera_config_t camera_config = {
.pin_pwdn = CAM_PIN_PWDN,
@@ -220,13 +223,17 @@ void CCamera::SetQualitySize(int qual, framesize_t resol)
void CCamera::EnableAutoExposure(int flashdauer)
{
LEDOnOff(true);
LightOnOff(true);
if (flashdauer > 0)
LightOnOff(true);
const TickType_t xDelay = flashdauer / portTICK_PERIOD_MS;
vTaskDelay( xDelay );
camera_fb_t * fb = esp_camera_fb_get();
if (!fb) {
ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed");
LEDOnOff(false);
LightOnOff(false);
doReboot();
}
esp_camera_fb_return(fb);
@@ -269,8 +276,11 @@ esp_err_t CCamera::CaptureToBasisImage(CImageBasis *_Image, int delay)
camera_fb_t * fb = esp_camera_fb_get();
if (!fb) {
ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed");
ESP_LOGE(TAGCAMERACLASS, "CaptureToBasisImage: Camera Capture Failed");
LEDOnOff(false);
LightOnOff(false);
doReboot();
return ESP_FAIL;
}
@@ -353,8 +363,11 @@ esp_err_t CCamera::CaptureToFile(std::string nm, int delay)
camera_fb_t * fb = esp_camera_fb_get();
if (!fb) {
ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed");
ESP_LOGE(TAGCAMERACLASS, "CaptureToFile: Camera Capture Failed");
LEDOnOff(false);
LightOnOff(false);
doReboot();
return ESP_FAIL;
}
LEDOnOff(false);
@@ -443,7 +456,11 @@ esp_err_t CCamera::CaptureToHTTP(httpd_req_t *req, int delay)
fb = esp_camera_fb_get();
if (!fb) {
ESP_LOGE(TAGCAMERACLASS, "Camera capture failed");
LEDOnOff(false);
LightOnOff(false);
httpd_resp_send_500(req);
// doReboot();
return ESP_FAIL;
}
@@ -480,15 +497,20 @@ esp_err_t CCamera::CaptureToHTTP(httpd_req_t *req, int delay)
void CCamera::LightOnOff(bool status)
{
// Init the GPIO
gpio_pad_select_gpio(FLASH_GPIO);
/* Set the GPIO as a push/pull output */
gpio_set_direction(FLASH_GPIO, GPIO_MODE_OUTPUT);
GpioHandler* gpioHandler = gpio_handler_get();
if ((gpioHandler != NULL) && (gpioHandler->isEnabled())) {
gpioHandler->flashLightEnable(status);
} else {
// Init the GPIO
gpio_pad_select_gpio(FLASH_GPIO);
/* Set the GPIO as a push/pull output */
gpio_set_direction(FLASH_GPIO, GPIO_MODE_OUTPUT);
if (status)
gpio_set_level(FLASH_GPIO, 1);
else
gpio_set_level(FLASH_GPIO, 0);
if (status)
gpio_set_level(FLASH_GPIO, 1);
else
gpio_set_level(FLASH_GPIO, 0);
}
}
void CCamera::LEDOnOff(bool status)

View File

@@ -16,9 +16,6 @@
#define CAMERA_MODEL_AI_THINKER
static const char *TAGCAMERACLASS = "server_part_camera";
class CCamera {
protected:
int ActualQuality;

View File

@@ -12,14 +12,17 @@
char scratch2[SCRATCH_BUFSIZE2];
//#define DEBUG_DETAIL_ON
static const char *TAGPARTCAMERA = "server_camera";
void PowerResetCamera(){
ESP_LOGD(TAGPARTCAMERA, "Resetting camera by power down line");
gpio_config_t conf = { 0 };
gpio_config_t conf;
conf.intr_type = GPIO_INTR_DISABLE;
conf.pin_bit_mask = 1LL << GPIO_NUM_32;
conf.mode = GPIO_MODE_OUTPUT;
conf.pull_down_en = GPIO_PULLDOWN_DISABLE;
conf.pull_up_en = GPIO_PULLUP_DISABLE;
gpio_config(&conf);
// carefull, logic is inverted compared to reset pin

View File

@@ -7,8 +7,6 @@
//#include "ClassControllCamera.h"
static const char *TAGPARTCAMERA = "server_camera";
void register_server_camera_uri(httpd_handle_t server);
void PowerResetCamera();

View File

@@ -1,7 +1,7 @@
FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
REQUIRES tfmicro esp_http_server app_update esp_http_client nvs_flash jomjol_tfliteclass jomjol_flowcontroll spiffs jomjol_helper)
INCLUDE_DIRS "." "../../include"
REQUIRES tfmicro esp_http_server app_update esp_http_client nvs_flash jomjol_tfliteclass jomjol_flowcontroll spiffs jomjol_helper jomjol_controlGPIO)

View File

@@ -26,9 +26,12 @@
#include <esp_spiffs.h>
#include "esp_http_server.h"
#include "defines.h"
#include "ClassLogFile.h"
#include "server_help.h"
#include "interface_mqtt.h"
#include "server_GPIO.h"
#include "Helper.h"
#include "miniz.h"
@@ -53,17 +56,17 @@ struct file_server_data {
char scratch[SCRATCH_BUFSIZE];
};
static const char *TAG = "file_server";
static const char *TAG_FILESERVER = "file_server";
/* Handler to redirect incoming GET request for /index.html to /
* This can be overridden by uploading file with same name */
static esp_err_t index_html_get_handler(httpd_req_t *req)
{
httpd_resp_set_status(req, "307 Temporary Redirect");
httpd_resp_set_hdr(req, "Location", "/");
httpd_resp_send(req, NULL, 0); // Response body can be empty
return ESP_OK;
}
// static esp_err_t index_html_get_handler(httpd_req_t *req)
// {
// httpd_resp_set_status(req, "307 Temporary Redirect");
// httpd_resp_set_hdr(req, "Location", "/");
// httpd_resp_send(req, NULL, 0); // Response body can be empty
// return ESP_OK;
// }
/* Send HTTP response with a run-time generated html consisting of
* a list of all files and folders under the requested path.
@@ -95,7 +98,7 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath, const
printf("entrypath: <%s>\n", entrypath);
if (!dir) {
ESP_LOGE(TAG, "Failed to stat dir : %s", dirpath);
ESP_LOGE(TAG_FILESERVER, "Failed to stat dir : %s", dirpath);
/* Respond with 404 Not Found */
httpd_resp_send_err(req, HTTPD_404_NOT_FOUND, "Directory does not exist");
return ESP_FAIL;
@@ -115,7 +118,7 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath, const
if (chunksize > 0){
if (httpd_resp_send_chunk(req, chunk, chunksize) != ESP_OK) {
fclose(fd);
ESP_LOGE(TAG, "File sending failed!");
ESP_LOGE(TAG_FILESERVER, "File sending failed!");
return ESP_FAIL;
}
}
@@ -154,11 +157,11 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath, const
strlcpy(entrypath + dirpath_len, entry->d_name, sizeof(entrypath) - dirpath_len);
printf("Entrypath: %s\n", entrypath);
if (stat(entrypath, &entry_stat) == -1) {
ESP_LOGE(TAG, "Failed to stat %s : %s", entrytype, entry->d_name);
ESP_LOGE(TAG_FILESERVER, "Failed to stat %s : %s", entrytype, entry->d_name);
continue;
}
sprintf(entrysize, "%ld", entry_stat.st_size);
ESP_LOGI(TAG, "Found %s : %s (%s bytes)", entrytype, entry->d_name, entrysize);
ESP_LOGI(TAG_FILESERVER, "Found %s : %s (%s bytes)", entrytype, entry->d_name, entrysize);
/* Send chunk of HTML file containing table entries with file name and size */
httpd_resp_sendstr_chunk(req, "<tr><td><a href=\"");
@@ -206,19 +209,19 @@ static esp_err_t logfileact_get_handler(httpd_req_t *req)
LogFile.WriteToFile("logfileact_get_handler");
char filepath[FILE_PATH_MAX];
FILE *fd = NULL;
struct stat file_stat;
//struct stat file_stat;
printf("uri: %s\n", req->uri);
const char filename = 'log_current.txt';
const char* filename = "log_current.txt";
printf("uri: %s, filename: %s, filepath: %s\n", req->uri, &filename, filepath);
printf("uri: %s, filename: %s, filepath: %s\n", req->uri, filename, filepath);
std::string currentfilename = LogFile.GetCurrentFileName();
fd = OpenFileAndWait(currentfilename.c_str(), "r");
if (!fd) {
ESP_LOGE(TAG, "Failed to read existing file : %s", filepath);
ESP_LOGE(TAG_FILESERVER, "Failed to read existing file : %s", filepath);
/* Respond with 500 Internal Server Error */
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to read existing file");
return ESP_FAIL;
@@ -226,8 +229,8 @@ static esp_err_t logfileact_get_handler(httpd_req_t *req)
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
// ESP_LOGI(TAG, "Sending file : %s (%ld bytes)...", &filename, file_stat.st_size);
set_content_type_from_file(req, &filename);
// ESP_LOGI(TAG_FILESERVER, "Sending file : %s (%ld bytes)...", &filename, file_stat.st_size);
set_content_type_from_file(req, filename);
/* Retrieve the pointer to scratch buffer for temporary storage */
char *chunk = ((struct file_server_data *)req->user_ctx)->scratch;
@@ -239,7 +242,7 @@ static esp_err_t logfileact_get_handler(httpd_req_t *req)
/* Send the buffer contents as HTTP response chunk */
if (httpd_resp_send_chunk(req, chunk, chunksize) != ESP_OK) {
fclose(fd);
ESP_LOGE(TAG, "File sending failed!");
ESP_LOGE(TAG_FILESERVER, "File sending failed!");
/* Abort sending file */
httpd_resp_sendstr_chunk(req, NULL);
/* Respond with 500 Internal Server Error */
@@ -252,7 +255,7 @@ static esp_err_t logfileact_get_handler(httpd_req_t *req)
/* Close file after sending complete */
fclose(fd);
ESP_LOGI(TAG, "File sending complete");
ESP_LOGI(TAG_FILESERVER, "File sending complete");
/* Respond with an empty chunk to signal HTTP response completion */
httpd_resp_send_chunk(req, NULL, 0);
@@ -284,7 +287,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
if (!filename) {
ESP_LOGE(TAG, "Filename is too long");
ESP_LOGE(TAG_FILESERVER, "Filename is too long");
/* Respond with 500 Internal Server Error */
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Filename too long");
return ESP_FAIL;
@@ -297,11 +300,11 @@ static esp_err_t download_get_handler(httpd_req_t *req)
if (buf_len > 1) {
char buf[buf_len];
if (httpd_req_get_url_query_str(req, buf, buf_len) == ESP_OK) {
ESP_LOGI(TAG, "Found URL query => %s", buf);
ESP_LOGI(TAG_FILESERVER, "Found URL query => %s", buf);
char param[32];
/* Get value of expected key from query string */
if (httpd_query_key_value(buf, "readonly", param, sizeof(param)) == ESP_OK) {
ESP_LOGI(TAG, "Found URL query parameter => readonly=%s", param);
ESP_LOGI(TAG_FILESERVER, "Found URL query parameter => readonly=%s", param);
readonly = param && strcmp(param,"true")==0;
}
}
@@ -316,7 +319,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
/* If file not present on SPIFFS check if URI
* corresponds to one of the hardcoded paths */
ESP_LOGE(TAG, "Failed to stat file : %s", filepath);
ESP_LOGE(TAG_FILESERVER, "Failed to stat file : %s", filepath);
/* Respond with 404 Not Found */
httpd_resp_send_err(req, HTTPD_404_NOT_FOUND, "File does not exist");
return ESP_FAIL;
@@ -324,7 +327,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
fd = OpenFileAndWait(filepath, "r");
if (!fd) {
ESP_LOGE(TAG, "Failed to read existing file : %s", filepath);
ESP_LOGE(TAG_FILESERVER, "Failed to read existing file : %s", filepath);
/* Respond with 500 Internal Server Error */
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to read existing file");
return ESP_FAIL;
@@ -332,7 +335,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
ESP_LOGI(TAG, "Sending file : %s (%ld bytes)...", filename, file_stat.st_size);
ESP_LOGI(TAG_FILESERVER, "Sending file : %s (%ld bytes)...", filename, file_stat.st_size);
set_content_type_from_file(req, filename);
/* Retrieve the pointer to scratch buffer for temporary storage */
@@ -345,7 +348,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
/* Send the buffer contents as HTTP response chunk */
if (httpd_resp_send_chunk(req, chunk, chunksize) != ESP_OK) {
fclose(fd);
ESP_LOGE(TAG, "File sending failed!");
ESP_LOGE(TAG_FILESERVER, "File sending failed!");
/* Abort sending file */
httpd_resp_sendstr_chunk(req, NULL);
/* Respond with 500 Internal Server Error */
@@ -358,7 +361,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
/* Close file after sending complete */
fclose(fd);
ESP_LOGI(TAG, "File sending complete");
ESP_LOGI(TAG_FILESERVER, "File sending complete");
/* Respond with an empty chunk to signal HTTP response completion */
httpd_resp_send_chunk(req, NULL, 0);
@@ -385,13 +388,13 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
/* Filename cannot have a trailing '/' */
if (filename[strlen(filename) - 1] == '/') {
ESP_LOGE(TAG, "Invalid filename : %s", filename);
ESP_LOGE(TAG_FILESERVER, "Invalid filename : %s", filename);
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Invalid filename");
return ESP_FAIL;
}
if (stat(filepath, &file_stat) == 0) {
ESP_LOGE(TAG, "File already exists : %s", filepath);
ESP_LOGE(TAG_FILESERVER, "File already exists : %s", filepath);
/* Respond with 400 Bad Request */
httpd_resp_send_err(req, HTTPD_400_BAD_REQUEST, "File already exists");
return ESP_FAIL;
@@ -399,7 +402,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
/* File cannot be larger than a limit */
if (req->content_len > MAX_FILE_SIZE) {
ESP_LOGE(TAG, "File too large : %d bytes", req->content_len);
ESP_LOGE(TAG_FILESERVER, "File too large : %d bytes", req->content_len);
/* Respond with 400 Bad Request */
httpd_resp_send_err(req, HTTPD_400_BAD_REQUEST,
"File size must be less than "
@@ -411,13 +414,13 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
fd = OpenFileAndWait(filepath, "w");
if (!fd) {
ESP_LOGE(TAG, "Failed to create file : %s", filepath);
ESP_LOGE(TAG_FILESERVER, "Failed to create file : %s", filepath);
/* Respond with 500 Internal Server Error */
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to create file");
return ESP_FAIL;
}
ESP_LOGI(TAG, "Receiving file : %s...", filename);
ESP_LOGI(TAG_FILESERVER, "Receiving file : %s...", filename);
/* Retrieve the pointer to scratch buffer for temporary storage */
char *buf = ((struct file_server_data *)req->user_ctx)->scratch;
@@ -429,7 +432,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
while (remaining > 0) {
ESP_LOGI(TAG, "Remaining size : %d", remaining);
ESP_LOGI(TAG_FILESERVER, "Remaining size : %d", remaining);
/* Receive the file part by part into a buffer */
if ((received = httpd_req_recv(req, buf, MIN(remaining, SCRATCH_BUFSIZE))) <= 0) {
if (received == HTTPD_SOCK_ERR_TIMEOUT) {
@@ -442,7 +445,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
fclose(fd);
unlink(filepath);
ESP_LOGE(TAG, "File reception failed!");
ESP_LOGE(TAG_FILESERVER, "File reception failed!");
/* Respond with 500 Internal Server Error */
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to receive file");
return ESP_FAIL;
@@ -455,7 +458,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
fclose(fd);
unlink(filepath);
ESP_LOGE(TAG, "File write failed!");
ESP_LOGE(TAG_FILESERVER, "File write failed!");
/* Respond with 500 Internal Server Error */
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to write file to storage");
return ESP_FAIL;
@@ -468,7 +471,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
/* Close file upon upload completion */
fclose(fd);
ESP_LOGI(TAG, "File reception complete");
ESP_LOGI(TAG_FILESERVER, "File reception complete");
std::string directory = std::string(filepath);
size_t zw = directory.find("/");
@@ -483,10 +486,10 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
int start_fn = strlen(((struct file_server_data *)req->user_ctx)->base_path);
printf("Directory: %s, start_fn: %d, found: %d\n", directory.c_str(), start_fn, found);
directory = directory.substr(start_fn, found - start_fn + 1);
printf("Directory danach: %s\n", directory.c_str());
printf("Directory danach 1: %s\n", directory.c_str());
directory = "/fileserver" + directory;
printf("Directory danach: %s\n", directory.c_str());
printf("Directory danach 2: %s\n", directory.c_str());
/* Redirect onto root to see the updated file list */
httpd_resp_set_status(req, "303 See Other");
@@ -496,6 +499,15 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
httpd_resp_set_status(req, "303 See Other");
httpd_resp_set_hdr(req, "Location", directory.c_str());
httpd_resp_sendstr(req, "File uploaded successfully");
/*
if (strcmp(filepath, CONFIG_FILE) == 0) {
printf("New config found. Reload handler.");
gpio_handler_deinit();
MQTTdestroy();
}
*/
return ESP_OK;
}
@@ -567,19 +579,19 @@ static esp_err_t delete_post_handler(httpd_req_t *req)
/* Filename cannot have a trailing '/' */
if (filename[strlen(filename) - 1] == '/') {
ESP_LOGE(TAG, "Invalid filename : %s", filename);
ESP_LOGE(TAG_FILESERVER, "Invalid filename : %s", filename);
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Invalid filename");
return ESP_FAIL;
}
if (stat(filepath, &file_stat) == -1) {
ESP_LOGE(TAG, "File does not exist : %s", filename);
ESP_LOGE(TAG_FILESERVER, "File does not exist : %s", filename);
/* Respond with 400 Bad Request */
httpd_resp_send_err(req, HTTPD_400_BAD_REQUEST, "File does not exist");
return ESP_FAIL;
}
ESP_LOGI(TAG, "Deleting file : %s", filename);
ESP_LOGI(TAG_FILESERVER, "Deleting file : %s", filename);
/* Delete file */
unlink(filepath);
@@ -596,10 +608,10 @@ static esp_err_t delete_post_handler(httpd_req_t *req)
int start_fn = strlen(((struct file_server_data *)req->user_ctx)->base_path);
printf("Directory: %s, start_fn: %d, found: %d\n", directory.c_str(), start_fn, found);
directory = directory.substr(start_fn, found - start_fn + 1);
printf("Directory danach: %s\n", directory.c_str());
printf("Directory danach 3: %s\n", directory.c_str());
directory = "/fileserver" + directory;
printf("Directory danach: %s\n", directory.c_str());
printf("Directory danach 4: %s\n", directory.c_str());
}
@@ -623,7 +635,7 @@ void delete_all_in_directory(std::string _directory)
std::string filename;
if (!dir) {
ESP_LOGE(TAG, "Failed to stat dir : %s", _directory.c_str());
ESP_LOGE(TAG_FILESERVER, "Failed to stat dir : %s", _directory.c_str());
return;
}
@@ -632,7 +644,7 @@ void delete_all_in_directory(std::string _directory)
if (!(entry->d_type == DT_DIR)){
if (strcmp("wlan.ini", entry->d_name) != 0){ // auf wlan.ini soll nicht zugegriffen werden !!!
filename = _directory + "/" + std::string(entry->d_name);
ESP_LOGI(TAG, "Deleting file : %s", filename.c_str());
ESP_LOGI(TAG_FILESERVER, "Deleting file : %s", filename.c_str());
/* Delete file */
unlink(filename.c_str());
}
@@ -722,19 +734,19 @@ void register_server_file_uri(httpd_handle_t server, const char *base_path)
/* Validate file storage base path */
if (!base_path) {
// if (!base_path || strcmp(base_path, "/spiffs") != 0) {
ESP_LOGE(TAG, "File server base_path not set");
ESP_LOGE(TAG_FILESERVER, "File server base_path not set");
// return ESP_ERR_INVALID_ARG;
}
if (server_data) {
ESP_LOGE(TAG, "File server already started");
ESP_LOGE(TAG_FILESERVER, "File server already started");
// return ESP_ERR_INVALID_STATE;
}
/* Allocate memory for server data */
server_data = (file_server_data *) calloc(1, sizeof(struct file_server_data));
if (!server_data) {
ESP_LOGE(TAG, "Failed to allocate memory for server data");
ESP_LOGE(TAG_FILESERVER, "Failed to allocate memory for server data");
// return ESP_ERR_NO_MEM;
}
strlcpy(server_data->base_path, base_path,

View File

@@ -107,6 +107,8 @@ esp_err_t set_content_type_from_file(httpd_req_t *req, const char *filename)
return httpd_resp_set_type(req, "text/html");
} else if (IS_FILE_EXT(filename, ".jpeg")) {
return httpd_resp_set_type(req, "image/jpeg");
} else if (IS_FILE_EXT(filename, ".jpg")) {
return httpd_resp_set_type(req, "image/jpeg");
} else if (IS_FILE_EXT(filename, ".ico")) {
return httpd_resp_set_type(req, "image/x-icon");
}

View File

@@ -12,7 +12,7 @@
#include "freertos/task.h"
#include "esp_system.h"
#include "esp_event.h"
#include "esp_event_loop.h"
#include "esp_event.h"
#include "esp_log.h"
#include <esp_ota_ops.h>
#include "esp_http_client.h"
@@ -28,6 +28,7 @@
#include "server_tflite.h"
#include "server_file.h"
#include "server_GPIO.h"
#include "ClassLogFile.h"
@@ -46,6 +47,7 @@ static char ota_write_data[BUFFSIZE + 1] = { 0 };
#define OTA_URL_SIZE 256
static const char *TAGPARTOTA = "server_ota";
static void infinite_loop(void)
@@ -60,14 +62,14 @@ static void infinite_loop(void)
static bool ota_example_task(std::string fn)
static bool ota_update_task(std::string fn)
{
esp_err_t err;
/* update handle : set by esp_ota_begin(), must be freed via esp_ota_end() */
esp_ota_handle_t update_handle = 0 ;
const esp_partition_t *update_partition = NULL;
ESP_LOGI(TAGPARTOTA, "Starting OTA example");
ESP_LOGI(TAGPARTOTA, "Starting OTA update");
const esp_partition_t *configured = esp_ota_get_boot_partition();
const esp_partition_t *running = esp_ota_get_running_partition();
@@ -374,7 +376,9 @@ esp_err_t handler_ota_update(httpd_req_t *req)
const char* resp_str;
if (ota_example_task(fn))
KillTFliteTasks();
gpio_handler_deinit();
if (ota_update_task(fn))
{
resp_str = "Firmware Update Successfull!<br><br>You can restart now.";
}
@@ -400,8 +404,6 @@ void hard_restart() {
void task_reboot(void *pvParameter)
{
while(1)
{
vTaskDelay(5000 / portTICK_PERIOD_MS);
@@ -413,12 +415,14 @@ void task_reboot(void *pvParameter)
}
void doReboot(){
LogFile.WriteToFile("Reboot - now");
KillTFliteTasks();
ESP_LOGI(TAGPARTOTA, "Reboot in 5sec");
LogFile.WriteToFile("Reboot in 5sec");
xTaskCreate(&task_reboot, "reboot", configMINIMAL_STACK_SIZE * 64, NULL, 10, NULL);
// KillTFliteTasks(); // kills itself
gpio_handler_destroy();
vTaskDelay(5000 / portTICK_PERIOD_MS);
esp_restart();
hard_restart();
hard_restart();
}

View File

@@ -4,9 +4,8 @@
//#include "ClassControllCamera.h"
static const char *TAGPARTOTA = "server_ota";
void register_server_ota_sdcard_uri(httpd_handle_t server);
void CheckOTAUpdate();
void doReboot();
void hard_restart();

View File

@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
REQUIRES jomjol_tfliteclass jomjol_helper jomjol_controlcamera jomjol_mqtt jomjol_fileserver_ota jomjol_image_proc connect_wlan)
REQUIRES jomjol_tfliteclass jomjol_helper jomjol_controlcamera jomjol_mqtt jomjol_fileserver_ota jomjol_image_proc jomjol_wlan)

View File

@@ -94,6 +94,23 @@ string ClassFlow::getReadout()
return string();
}
std::string ClassFlow::GetParameterName(std::string _input)
{
string _param;
int _pospunkt = _input.find_first_of(".");
if (_pospunkt > -1)
{
_param = _input.substr(_pospunkt+1, _input.length() - _pospunkt - 1);
}
else
{
_param = _input;
}
// printf("Parameter: %s, Pospunkt: %d\n", _param.c_str(), _pospunkt);
return _param;
}
bool ClassFlow::getNextLine(FILE* pfile, string *rt)
{
char zw[1024];
@@ -102,24 +119,21 @@ bool ClassFlow::getNextLine(FILE* pfile, string *rt)
*rt = "";
return false;
}
fgets(zw, 1024, pfile);
printf("%s", zw);
if ((strlen(zw) == 0) && feof(pfile))
if (!fgets(zw, 1024, pfile))
{
*rt = "";
printf("END OF FILE\n");
return false;
}
printf("%s", zw);
*rt = zw;
*rt = trim(*rt);
while ((zw[0] == ';' || zw[0] == '#' || (rt->size() == 0)) && !(zw[1] == '[')) // Kommentarzeilen (; oder #) und Leerzeilen überspringen, es sei denn es ist ein neuer auskommentierter Paragraph
{
fgets(zw, 1024, pfile);
printf("%s", zw);
if (feof(pfile))
{
*rt = "";
*rt = "";
if (!fgets(zw, 1024, pfile))
return false;
}
printf("%s", zw);
*rt = zw;
*rt = trim(*rt);
}

View File

@@ -37,6 +37,8 @@ protected:
virtual void SetInitialParameter(void);
std::string GetParameterName(std::string _input);
bool disabled;
public:

View File

@@ -19,6 +19,7 @@ void ClassFlowAlignment::SetInitialParameter(void)
initalrotate = 0;
anz_ref = 0;
initialmirror = false;
initialflip = false;
SaveAllFiles = false;
namerawimage = "/sdcard/img_tmp/raw.jpg";
FileStoreRefAlignment = "/sdcard/config/align.txt";
@@ -72,6 +73,11 @@ bool ClassFlowAlignment::ReadParameter(FILE* pfile, string& aktparamgraph)
while (this->getNextLine(pfile, &aktparamgraph) && !this->isNewParagraph(aktparamgraph))
{
zerlegt = ZerlegeZeile(aktparamgraph);
if ((toUpper(zerlegt[0]) == "FLIPIMAGESIZE") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
initialflip = true;
}
if ((toUpper(zerlegt[0]) == "INITIALMIRROR") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
@@ -153,7 +159,13 @@ bool ClassFlowAlignment::doFlow(string time)
delete AlignAndCutImage;
AlignAndCutImage = new CAlignAndCutImage(ImageBasis, ImageTMP);
CRotateImage rt(AlignAndCutImage, ImageTMP);
CRotateImage rt(AlignAndCutImage, ImageTMP, initialflip);
if (initialflip)
{
int _zw = ImageBasis->height;
ImageBasis->height = ImageBasis->width;
ImageBasis->width = _zw;
}
if (initialmirror){
printf("do mirror\n");
@@ -161,7 +173,7 @@ bool ClassFlowAlignment::doFlow(string time)
if (SaveAllFiles) AlignAndCutImage->SaveToFile(FormatFileName("/sdcard/img_tmp/mirror.jpg"));
}
if (initalrotate != 0)
if ((initalrotate != 0) || initialflip)
{
rt.Rotate(initalrotate);
if (SaveAllFiles) AlignAndCutImage->SaveToFile(FormatFileName("/sdcard/img_tmp/rot.jpg"));
@@ -176,6 +188,12 @@ bool ClassFlowAlignment::doFlow(string time)
if (SaveAllFiles)
{
if (initialflip)
{
int _zw = ImageTMP->width;
ImageTMP->width = ImageTMP->height;
ImageTMP->height = _zw;
}
DrawRef(ImageTMP);
ImageTMP->SaveToFile(FormatFileName("/sdcard/img_tmp/alg_roi.jpg"));
}
@@ -209,7 +227,7 @@ void ClassFlowAlignment::SaveReferenceAlignmentValues()
time(&rawtime);
timeinfo = localtime(&rawtime);
strftime(buffer, 80, "%Y-%m-%d_%H-%M-%S", timeinfo);
strftime(buffer, 80, "%Y-%m-%dT%H:%M:%S", timeinfo);
zwtime = std::string(buffer);
}

View File

@@ -15,6 +15,7 @@ class ClassFlowAlignment :
protected:
float initalrotate;
bool initialmirror;
bool initialflip;
RefInfo References[2];
int anz_ref;
string namerawimage;

View File

@@ -1,348 +0,0 @@
#include "ClassFlowAnalog.h"
#include <math.h>
#include <iomanip>
#include <sys/types.h>
// #define OHNETFLITE
#ifndef OHNETFLITE
#include "CTfLiteClass.h"
#endif
#include "ClassLogFile.h"
static const char* TAG = "flow_analog";
bool debugdetailanalog = false;
void ClassFlowAnalog::SetInitialParameter(void)
{
string cnnmodelfile = "";
modelxsize = 1;
modelysize = 1;
ListFlowControll = NULL;
previousElement = NULL;
SaveAllFiles = false;
disabled = false;
extendedResolution = false;
}
ClassFlowAnalog::ClassFlowAnalog(std::vector<ClassFlow*>* lfc) : ClassFlowImage(lfc, TAG)
{
SetInitialParameter();
ListFlowControll = lfc;
for (int i = 0; i < ListFlowControll->size(); ++i)
{
if (((*ListFlowControll)[i])->name().compare("ClassFlowAlignment") == 0)
{
flowpostalignment = (ClassFlowAlignment*) (*ListFlowControll)[i];
}
}
}
int ClassFlowAnalog::AnzahlROIs()
{
int zw = ROI.size();
if (extendedResolution)
zw++;
return zw;
}
string ClassFlowAnalog::getReadout()
{
string result = "";
if (ROI.size() == 0)
return result;
float zahl = ROI[ROI.size() - 1]->result;
int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
int prev = -1;
prev = ZeigerEval(ROI[ROI.size() - 1]->result, prev);
result = std::to_string(prev);
if (extendedResolution)
result = result + std::to_string(ergebnis_nachkomma);
for (int i = ROI.size() - 2; i >= 0; --i)
{
prev = ZeigerEval(ROI[i]->result, prev);
result = std::to_string(prev) + result;
}
return result;
}
int ClassFlowAnalog::ZeigerEval(float zahl, int ziffer_vorgaenger)
{
int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
int ergebnis_vorkomma = ((int) floor(zahl)) % 10;
int ergebnis, ergebnis_rating;
if (ziffer_vorgaenger == -1)
return ergebnis_vorkomma % 10;
ergebnis_rating = ergebnis_nachkomma - ziffer_vorgaenger;
if (ergebnis_nachkomma >= 5)
ergebnis_rating-=5;
else
ergebnis_rating+=5;
ergebnis = (int) round(zahl);
if (ergebnis_rating < 0)
ergebnis-=1;
if (ergebnis == -1)
ergebnis+=10;
ergebnis = ergebnis % 10;
return ergebnis;
}
bool ClassFlowAnalog::ReadParameter(FILE* pfile, string& aktparamgraph)
{
std::vector<string> zerlegt;
aktparamgraph = trim(aktparamgraph);
if (aktparamgraph.size() == 0)
if (!this->GetNextParagraph(pfile, aktparamgraph))
return false;
if ((aktparamgraph.compare("[Analog]") != 0) && (aktparamgraph.compare(";[Analog]") != 0)) // Paragraph passt nich zu MakeImage
return false;
if (aktparamgraph[0] == ';')
{
disabled = true;
while (getNextLine(pfile, &aktparamgraph) && !isNewParagraph(aktparamgraph));
printf("[Analog] is disabled !!!\n");
return true;
}
while (this->getNextLine(pfile, &aktparamgraph) && !this->isNewParagraph(aktparamgraph))
{
zerlegt = this->ZerlegeZeile(aktparamgraph);
if ((zerlegt[0] == "LogImageLocation") && (zerlegt.size() > 1))
{
this->LogImageLocation = "/sdcard" + zerlegt[1];
this->isLogImage = true;
}
if ((toUpper(zerlegt[0]) == "LOGFILERETENTIONINDAYS") && (zerlegt.size() > 1))
{
this->logfileRetentionInDays = std::stoi(zerlegt[1]);
}
if ((zerlegt[0] == "Model") && (zerlegt.size() > 1))
{
this->cnnmodelfile = zerlegt[1];
}
if ((zerlegt[0] == "ModelInputSize") && (zerlegt.size() > 2))
{
this->modelxsize = std::stoi(zerlegt[1]);
this->modelysize = std::stoi(zerlegt[2]);
}
if (zerlegt.size() >= 5)
{
roianalog* neuroi = new roianalog;
neuroi->name = zerlegt[0];
neuroi->posx = std::stoi(zerlegt[1]);
neuroi->posy = std::stoi(zerlegt[2]);
neuroi->deltax = std::stoi(zerlegt[3]);
neuroi->deltay = std::stoi(zerlegt[4]);
neuroi->result = -1;
neuroi->image = NULL;
neuroi->image_org = NULL;
ROI.push_back(neuroi);
}
if ((toUpper(zerlegt[0]) == "SAVEALLFILES") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
SaveAllFiles = true;
}
if ((toUpper(zerlegt[0]) == "EXTENDEDRESOLUTION") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
extendedResolution = true;
}
}
for (int i = 0; i < ROI.size(); ++i)
{
ROI[i]->image = new CImageBasis(modelxsize, modelysize, 3);
ROI[i]->image_org = new CImageBasis(ROI[i]->deltax, ROI[i]->deltay, 3);
}
return true;
}
string ClassFlowAnalog::getHTMLSingleStep(string host)
{
string result, zw;
std::vector<HTMLInfo*> htmlinfo;
result = "<p>Found ROIs: </p> <p><img src=\"" + host + "/img_tmp/alg_roi.jpg\"></p>\n";
result = result + "Analog Pointers: <p> ";
htmlinfo = GetHTMLInfo();
for (int i = 0; i < htmlinfo.size(); ++i)
{
std::stringstream stream;
stream << std::fixed << std::setprecision(1) << htmlinfo[i]->val;
zw = stream.str();
result = result + "<img src=\"" + host + "/img_tmp/" + htmlinfo[i]->filename + "\"> " + zw;
delete htmlinfo[i];
}
htmlinfo.clear();
return result;
}
bool ClassFlowAnalog::doFlow(string time)
{
if (disabled)
return true;
if (!doAlignAndCut(time)){
return false;
};
if (debugdetailanalog) LogFile.WriteToFile("ClassFlowAnalog::doFlow nach Alignment");
doNeuralNetwork(time);
RemoveOldLogs();
return true;
}
bool ClassFlowAnalog::doAlignAndCut(string time)
{
if (disabled)
return true;
CAlignAndCutImage *caic = flowpostalignment->GetAlignAndCutImage();
for (int i = 0; i < ROI.size(); ++i)
{
printf("Analog %d - Align&Cut\n", i);
caic->CutAndSave(ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay, ROI[i]->image_org);
if (SaveAllFiles) ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ROI[i]->name + ".jpg"));
ROI[i]->image_org->Resize(modelxsize, modelysize, ROI[i]->image);
if (SaveAllFiles) ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ROI[i]->name + ".bmp"));
}
return true;
}
void ClassFlowAnalog::DrawROI(CImageBasis *_zw)
{
int r = 0;
int g = 255;
int b = 0;
for (int i = 0; i < ROI.size(); ++i)
{
_zw->drawRect(ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay, r, g, b, 1);
_zw->drawCircle((int) (ROI[i]->posx + ROI[i]->deltax/2), (int) (ROI[i]->posy + ROI[i]->deltay/2), (int) (ROI[i]->deltax/2), r, g, b, 2);
_zw->drawLine((int) (ROI[i]->posx + ROI[i]->deltax/2), (int) ROI[i]->posy, (int) (ROI[i]->posx + ROI[i]->deltax/2), (int) (ROI[i]->posy + ROI[i]->deltay), r, g, b, 2);
_zw->drawLine((int) ROI[i]->posx, (int) (ROI[i]->posy + ROI[i]->deltay/2), (int) ROI[i]->posx + ROI[i]->deltax, (int) (ROI[i]->posy + ROI[i]->deltay/2), r, g, b, 2);
}
}
bool ClassFlowAnalog::doNeuralNetwork(string time)
{
if (disabled)
return true;
string logPath = CreateLogFolder(time);
string input = "/sdcard/img_tmp/alg.jpg";
string ioresize = "/sdcard/img_tmp/resize.bmp";
string output;
input = FormatFileName(input);
#ifndef OHNETFLITE
CTfLiteClass *tflite = new CTfLiteClass;
string zwcnn = "/sdcard" + cnnmodelfile;
zwcnn = FormatFileName(zwcnn);
printf(zwcnn.c_str());printf("\n");
tflite->LoadModel(zwcnn);
tflite->MakeAllocate();
#endif
for (int i = 0; i < ROI.size(); ++i)
{
printf("Analog %d - TfLite\n", i);
ioresize = "/sdcard/img_tmp/ra" + std::to_string(i) + ".bmp";
ioresize = FormatFileName(ioresize);
float f1, f2;
f1 = 0; f2 = 0;
#ifndef OHNETFLITE
// LogFile.WriteToFile("ClassFlowAnalog::doNeuralNetwork vor CNN tflite->LoadInputImage(ioresize)");
// tflite->LoadInputImage(ioresize);
tflite->LoadInputImageBasis(ROI[i]->image);
tflite->Invoke();
if (debugdetailanalog) LogFile.WriteToFile("Nach Invoke");
f1 = tflite->GetOutputValue(0);
f2 = tflite->GetOutputValue(1);
#endif
float result = fmod(atan2(f1, f2) / (M_PI * 2) + 2, 1);
// printf("Result sin, cos, ziffer: %f, %f, %f\n", f1, f2, result);
ROI[i]->result = result * 10;
printf("Result Analog%i: %f\n", i, ROI[i]->result);
if (isLogImage)
{
LogImage(logPath, ROI[i]->name, &ROI[i]->result, NULL, time, ROI[i]->image_org);
}
}
#ifndef OHNETFLITE
delete tflite;
#endif
return true;
}
std::vector<HTMLInfo*> ClassFlowAnalog::GetHTMLInfo()
{
std::vector<HTMLInfo*> result;
for (int i = 0; i < ROI.size(); ++i)
{
HTMLInfo *zw = new HTMLInfo;
zw->filename = ROI[i]->name + ".bmp";
zw->filename_org = ROI[i]->name + ".jpg";
zw->val = ROI[i]->result;
zw->image = ROI[i]->image;
zw->image_org = ROI[i]->image_org;
result.push_back(zw);
}
return result;
}

View File

@@ -1,48 +0,0 @@
#pragma once
#include "ClassFlowImage.h"
#include "ClassFlowAlignment.h"
// #include "CTfLiteClass.h"
struct roianalog {
int posx, posy, deltax, deltay;
float result;
CImageBasis *image, *image_org;
string name;
};
class ClassFlowAnalog :
public ClassFlowImage
{
protected:
std::vector<roianalog*> ROI;
string cnnmodelfile;
int modelxsize, modelysize;
int ZeigerEval(float zahl, int ziffer_vorgaenger);
bool SaveAllFiles;
ClassFlowAlignment* flowpostalignment;
void SetInitialParameter(void);
public:
bool extendedResolution;
ClassFlowAnalog(std::vector<ClassFlow*>* lfc);
bool ReadParameter(FILE* pfile, string& aktparamgraph);
bool doFlow(string time);
string getHTMLSingleStep(string host);
string getReadout();
void DrawROI(CImageBasis *_zw);
bool doNeuralNetwork(string time);
bool doAlignAndCut(string time);
std::vector<HTMLInfo*> GetHTMLInfo();
int AnzahlROIs();
string name(){return "ClassFlowAnalog";};
};

View File

@@ -0,0 +1,650 @@
#include "ClassFlowCNNGeneral.h"
#include <math.h>
#include <iomanip>
#include <sys/types.h>
#include <sstream> // std::stringstream
#include "CTfLiteClass.h"
#include "ClassLogFile.h"
static const char* TAG = "flow_analog";
bool debugdetailgeneral = false;
ClassFlowCNNGeneral::ClassFlowCNNGeneral(ClassFlowAlignment *_flowalign, t_CNNType _cnntype) : ClassFlowImage(NULL, TAG)
{
string cnnmodelfile = "";
modelxsize = 1;
modelysize = 1;
ListFlowControll = NULL;
previousElement = NULL;
SaveAllFiles = false;
disabled = false;
extendedResolution = false;
isLogImageSelect = false;
CNNType = AutoDetect;
CNNType = _cnntype;
flowpostalignment = _flowalign;
}
int ClassFlowCNNGeneral::AnzahlROIs(int _analog = 0)
{
int zw = GENERAL[_analog]->ROI.size();
if (extendedResolution && (CNNType != Digital)) zw++; // da letzte Ziffer inkl Nachhkomma, es sei denn, das Nachkomma gibt es nicht (Digital)
return zw;
}
string ClassFlowCNNGeneral::getReadout(int _analog = 0)
{
string result = "";
if (GENERAL[_analog]->ROI.size() == 0)
return result;
if (CNNType == Analogue)
{
float zahl = GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result;
int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
int prev = -1;
prev = ZeigerEval(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result, prev);
result = std::to_string(prev);
if (extendedResolution && (CNNType != Digital))
result = result + std::to_string(ergebnis_nachkomma);
for (int i = GENERAL[_analog]->ROI.size() - 2; i >= 0; --i)
{
prev = ZeigerEval(GENERAL[_analog]->ROI[i]->result, prev);
result = std::to_string(prev) + result;
}
}
if (CNNType == Digital)
{
for (int i = 0; i < GENERAL[_analog]->ROI.size(); ++i)
{
if (GENERAL[_analog]->ROI[i]->resultklasse == 10)
result = result + "N";
else
result = result + std::to_string(GENERAL[_analog]->ROI[i]->resultklasse);
}
}
if (CNNType == DigitalHyprid)
{
// int ergebnis_nachkomma = -1;
int zif_akt = -1;
float zahl = GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result;
if (zahl >= 0) // NaN?
{
if (extendedResolution)
{
int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
int ergebnis_vorkomma = ((int) floor(zahl)) % 10;
result = std::to_string(ergebnis_vorkomma) + std::to_string(ergebnis_nachkomma);
}
else
{
zif_akt = ZeigerEvalHybrid(GENERAL[_analog]->ROI[GENERAL[_analog]->ROI.size() - 1]->result, -1, -1);
result = std::to_string(zif_akt);
}
}
else
{
result = "N";
if (extendedResolution && (CNNType != Digital))
result = "NN";
}
for (int i = GENERAL[_analog]->ROI.size() - 2; i >= 0; --i)
{
if (GENERAL[_analog]->ROI[i]->result >= 0)
{
zif_akt = ZeigerEvalHybrid(GENERAL[_analog]->ROI[i]->result, GENERAL[_analog]->ROI[i+1]->result, zif_akt);
result = std::to_string(zif_akt) + result;
}
else
{
zif_akt = -1;
result = "N" + result;
}
}
}
return result;
}
int ClassFlowCNNGeneral::ZeigerEvalHybrid(float zahl, float zahl_vorgaenger, int eval_vorgaenger)
{
int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
// int ergebnis_vorkomma = ((int) floor(zahl)) % 10;
if (zahl_vorgaenger < 0) // keine Vorzahl vorhanden !!! --> Runde die Zahl
{
if ((ergebnis_nachkomma <= 2) || (ergebnis_nachkomma >= 8)) // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
return (int) round(zahl);
else
return (int) trunc(zahl);
}
if (zahl_vorgaenger > 9.2) // Ziffernwechsel beginnt
{
if (eval_vorgaenger == 0) // Wechsel hat schon stattgefunden
{
return (int) round(zahl); // Annahme, dass die neue Zahl schon in der Nähe des Ziels ist
}
else
{
if (zahl_vorgaenger <= 9.5) // Wechsel startet gerade, aber beginnt erst
{
if ((ergebnis_nachkomma <= 2) || (ergebnis_nachkomma >= 8)) // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
return (int) round(zahl);
else
return (int) trunc(zahl);
}
else
{
return (int) trunc(zahl); // Wechsel schon weiter fortgeschritten, d.h. über 2 als Nachkomma
}
}
}
if ((ergebnis_nachkomma <= 2) || (ergebnis_nachkomma >= 8)) // Band um die Ziffer --> Runden, da Ziffer im Rahmen Ungenauigkeit erreicht
return (int) round(zahl);
return (int) trunc(zahl);
}
int ClassFlowCNNGeneral::ZeigerEval(float zahl, int ziffer_vorgaenger)
{
int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
int ergebnis_vorkomma = ((int) floor(zahl)) % 10;
int ergebnis, ergebnis_rating;
if (ziffer_vorgaenger == -1)
return ergebnis_vorkomma % 10;
ergebnis_rating = ergebnis_nachkomma - ziffer_vorgaenger;
if (ergebnis_nachkomma >= 5)
ergebnis_rating-=5;
else
ergebnis_rating+=5;
ergebnis = (int) round(zahl);
if (ergebnis_rating < 0)
ergebnis-=1;
if (ergebnis == -1)
ergebnis+=10;
ergebnis = ergebnis % 10;
return ergebnis;
}
bool ClassFlowCNNGeneral::ReadParameter(FILE* pfile, string& aktparamgraph)
{
std::vector<string> zerlegt;
aktparamgraph = trim(aktparamgraph);
if (aktparamgraph.size() == 0)
if (!this->GetNextParagraph(pfile, aktparamgraph))
return false;
if ((toUpper(aktparamgraph) != "[ANALOG]") && (toUpper(aktparamgraph) != ";[ANALOG]")
&& (toUpper(aktparamgraph) != "[DIGIT]") && (toUpper(aktparamgraph) != ";[DIGIT]")
&& (toUpper(aktparamgraph) != "[DIGITS]") && (toUpper(aktparamgraph) != ";[DIGITS]")
) // Paragraph passt nicht
return false;
/*
if ((aktparamgraph.compare("[Analog]") != 0) && (aktparamgraph.compare(";[Analog]") != 0)
&& (aktparamgraph.compare("[Digit]") != 0) && (aktparamgraph.compare(";[Digit]"))) // Paragraph passt nicht
return false;
*/
if (aktparamgraph[0] == ';')
{
disabled = true;
while (getNextLine(pfile, &aktparamgraph) && !isNewParagraph(aktparamgraph));
printf("[Analog/Digit] is disabled !!!\n");
return true;
}
while (this->getNextLine(pfile, &aktparamgraph) && !this->isNewParagraph(aktparamgraph))
{
zerlegt = this->ZerlegeZeile(aktparamgraph);
if ((zerlegt[0] == "LogImageLocation") && (zerlegt.size() > 1))
{
this->LogImageLocation = "/sdcard" + zerlegt[1];
this->isLogImage = true;
}
if ((zerlegt[0] == "LogImageSelect") && (zerlegt.size() > 1))
{
LogImageSelect = zerlegt[1];
isLogImageSelect = true;
}
if ((toUpper(zerlegt[0]) == "LOGFILERETENTIONINDAYS") && (zerlegt.size() > 1))
{
this->logfileRetentionInDays = std::stoi(zerlegt[1]);
}
if ((toUpper(zerlegt[0]) == "MODELTYPE") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "DIGITHYPRID")
CNNType = DigitalHyprid;
}
if ((zerlegt[0] == "Model") && (zerlegt.size() > 1))
{
this->cnnmodelfile = zerlegt[1];
}
if ((zerlegt[0] == "ModelInputSize") && (zerlegt.size() > 2))
{
this->modelxsize = std::stoi(zerlegt[1]);
this->modelysize = std::stoi(zerlegt[2]);
}
if (zerlegt.size() >= 5)
{
general* _analog = GetGENERAL(zerlegt[0], true);
roi* neuroi = _analog->ROI[_analog->ROI.size()-1];
neuroi->posx = std::stoi(zerlegt[1]);
neuroi->posy = std::stoi(zerlegt[2]);
neuroi->deltax = std::stoi(zerlegt[3]);
neuroi->deltay = std::stoi(zerlegt[4]);
neuroi->result = -1;
neuroi->image = NULL;
neuroi->image_org = NULL;
}
if ((toUpper(zerlegt[0]) == "SAVEALLFILES") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
SaveAllFiles = true;
}
if ((toUpper(zerlegt[0]) == "EXTENDEDRESOLUTION") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
extendedResolution = true;
}
}
for (int _ana = 0; _ana < GENERAL.size(); ++_ana)
for (int i = 0; i < GENERAL[_ana]->ROI.size(); ++i)
{
GENERAL[_ana]->ROI[i]->image = new CImageBasis(modelxsize, modelysize, 3);
GENERAL[_ana]->ROI[i]->image_org = new CImageBasis(GENERAL[_ana]->ROI[i]->deltax, GENERAL[_ana]->ROI[i]->deltay, 3);
}
return true;
}
general* ClassFlowCNNGeneral::FindGENERAL(string _name_number)
{
for (int i = 0; i < GENERAL.size(); ++i)
if (GENERAL[i]->name == _name_number)
return GENERAL[i];
return NULL;
}
general* ClassFlowCNNGeneral::GetGENERAL(string _name, bool _create = true)
{
string _analog, _roi;
int _pospunkt = _name.find_first_of(".");
if (_pospunkt > -1)
{
_analog = _name.substr(0, _pospunkt);
_roi = _name.substr(_pospunkt+1, _name.length() - _pospunkt - 1);
}
else
{
_analog = "default";
_roi = _name;
}
general *_ret = NULL;
for (int i = 0; i < GENERAL.size(); ++i)
if (GENERAL[i]->name == _analog)
_ret = GENERAL[i];
if (!_create) // nicht gefunden und soll auch nicht erzeugt werden
return _ret;
if (_ret == NULL)
{
_ret = new general;
_ret->name = _analog;
GENERAL.push_back(_ret);
}
roi* neuroi = new roi;
neuroi->name = _roi;
_ret->ROI.push_back(neuroi);
printf("GetGENERAL - GENERAL %s - roi %s\n", _analog.c_str(), _roi.c_str());
return _ret;
}
string ClassFlowCNNGeneral::getHTMLSingleStep(string host)
{
string result, zw;
std::vector<HTMLInfo*> htmlinfo;
result = "<p>Found ROIs: </p> <p><img src=\"" + host + "/img_tmp/alg_roi.jpg\"></p>\n";
result = result + "Analog Pointers: <p> ";
htmlinfo = GetHTMLInfo();
for (int i = 0; i < htmlinfo.size(); ++i)
{
std::stringstream stream;
stream << std::fixed << std::setprecision(1) << htmlinfo[i]->val;
zw = stream.str();
result = result + "<img src=\"" + host + "/img_tmp/" + htmlinfo[i]->filename + "\"> " + zw;
delete htmlinfo[i];
}
htmlinfo.clear();
return result;
}
bool ClassFlowCNNGeneral::doFlow(string time)
{
if (disabled)
return true;
if (!doAlignAndCut(time)){
return false;
};
if (debugdetailgeneral) LogFile.WriteToFile("ClassFlowCNNGeneral::doFlow nach Alignment");
doNeuralNetwork(time);
RemoveOldLogs();
return true;
}
bool ClassFlowCNNGeneral::doAlignAndCut(string time)
{
if (disabled)
return true;
CAlignAndCutImage *caic = flowpostalignment->GetAlignAndCutImage();
for (int _ana = 0; _ana < GENERAL.size(); ++_ana)
for (int i = 0; i < GENERAL[_ana]->ROI.size(); ++i)
{
printf("General %d - Align&Cut\n", i);
caic->CutAndSave(GENERAL[_ana]->ROI[i]->posx, GENERAL[_ana]->ROI[i]->posy, GENERAL[_ana]->ROI[i]->deltax, GENERAL[_ana]->ROI[i]->deltay, GENERAL[_ana]->ROI[i]->image_org);
if (SaveAllFiles)
{
if (GENERAL[_ana]->name == "default")
GENERAL[_ana]->ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + GENERAL[_ana]->ROI[i]->name + ".jpg"));
else
GENERAL[_ana]->ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + GENERAL[_ana]->name + "_" + GENERAL[_ana]->ROI[i]->name + ".jpg"));
}
GENERAL[_ana]->ROI[i]->image_org->Resize(modelxsize, modelysize, GENERAL[_ana]->ROI[i]->image);
if (SaveAllFiles)
{
if (GENERAL[_ana]->name == "default")
GENERAL[_ana]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + GENERAL[_ana]->ROI[i]->name + ".bmp"));
else
GENERAL[_ana]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + GENERAL[_ana]->name + "_" + GENERAL[_ana]->ROI[i]->name + ".bmp"));
}
}
return true;
}
void ClassFlowCNNGeneral::DrawROI(CImageBasis *_zw)
{
if (CNNType == Analogue)
{
int r = 0;
int g = 255;
int b = 0;
for (int _ana = 0; _ana < GENERAL.size(); ++_ana)
for (int i = 0; i < GENERAL[_ana]->ROI.size(); ++i)
{
_zw->drawRect(GENERAL[_ana]->ROI[i]->posx, GENERAL[_ana]->ROI[i]->posy, GENERAL[_ana]->ROI[i]->deltax, GENERAL[_ana]->ROI[i]->deltay, r, g, b, 1);
_zw->drawCircle((int) (GENERAL[_ana]->ROI[i]->posx + GENERAL[_ana]->ROI[i]->deltax/2), (int) (GENERAL[_ana]->ROI[i]->posy + GENERAL[_ana]->ROI[i]->deltay/2), (int) (GENERAL[_ana]->ROI[i]->deltax/2), r, g, b, 2);
_zw->drawLine((int) (GENERAL[_ana]->ROI[i]->posx + GENERAL[_ana]->ROI[i]->deltax/2), (int) GENERAL[_ana]->ROI[i]->posy, (int) (GENERAL[_ana]->ROI[i]->posx + GENERAL[_ana]->ROI[i]->deltax/2), (int) (GENERAL[_ana]->ROI[i]->posy + GENERAL[_ana]->ROI[i]->deltay), r, g, b, 2);
_zw->drawLine((int) GENERAL[_ana]->ROI[i]->posx, (int) (GENERAL[_ana]->ROI[i]->posy + GENERAL[_ana]->ROI[i]->deltay/2), (int) GENERAL[_ana]->ROI[i]->posx + GENERAL[_ana]->ROI[i]->deltax, (int) (GENERAL[_ana]->ROI[i]->posy + GENERAL[_ana]->ROI[i]->deltay/2), r, g, b, 2);
}
}
else
{
for (int _dig = 0; _dig < GENERAL.size(); ++_dig)
for (int i = 0; i < GENERAL[_dig]->ROI.size(); ++i)
_zw->drawRect(GENERAL[_dig]->ROI[i]->posx, GENERAL[_dig]->ROI[i]->posy, GENERAL[_dig]->ROI[i]->deltax, GENERAL[_dig]->ROI[i]->deltay, 0, 0, (255 - _dig*100), 2);
}
}
bool ClassFlowCNNGeneral::doNeuralNetwork(string time)
{
if (disabled)
return true;
string logPath = CreateLogFolder(time);
CTfLiteClass *tflite = new CTfLiteClass;
string zwcnn = "/sdcard" + cnnmodelfile;
zwcnn = FormatFileName(zwcnn);
printf(zwcnn.c_str());printf("\n");
if (!tflite->LoadModel(zwcnn)) {
printf("Can't read model file /sdcard%s\n", cnnmodelfile.c_str());
LogFile.WriteToFile("Cannot load model");
delete tflite;
return false;
}
tflite->MakeAllocate();
if (CNNType == AutoDetect)
{
int _anzoutputdimensions = tflite->GetAnzOutPut();
switch (_anzoutputdimensions)
{
case 2:
CNNType = Analogue;
printf("TFlite-Type set to Analogue\n");
break;
case 11:
CNNType = Digital;
printf("TFlite-Type set to Digital\n");
break;
case 22:
CNNType = DigitalHyprid;
printf("TFlite-Type set to DigitalHyprid\n");
break;
default:
printf("ERROR ERROR ERROR - tflite passt nicht zur Firmware - ERROR ERROR ERROR\n");
}
}
for (int _ana = 0; _ana < GENERAL.size(); ++_ana)
{
for (int i = 0; i < GENERAL[_ana]->ROI.size(); ++i)
{
printf("General %d - TfLite\n", i);
switch (CNNType) {
case Analogue:
{
float f1, f2;
f1 = 0; f2 = 0;
tflite->LoadInputImageBasis(GENERAL[_ana]->ROI[i]->image);
tflite->Invoke();
if (debugdetailgeneral) LogFile.WriteToFile("Nach Invoke");
f1 = tflite->GetOutputValue(0);
f2 = tflite->GetOutputValue(1);
float result = fmod(atan2(f1, f2) / (M_PI * 2) + 2, 1);
GENERAL[_ana]->ROI[i]->result = result * 10;
printf("Result General(Analog)%i: %f\n", i, GENERAL[_ana]->ROI[i]->result);
if (isLogImage)
LogImage(logPath, GENERAL[_ana]->ROI[i]->name, &GENERAL[_ana]->ROI[i]->result, NULL, time, GENERAL[_ana]->ROI[i]->image_org);
} break;
case Digital:
{
GENERAL[_ana]->ROI[i]->resultklasse = 0;
GENERAL[_ana]->ROI[i]->resultklasse = tflite->GetClassFromImageBasis(GENERAL[_ana]->ROI[i]->image);
printf("Result General(Digit)%i: %d\n", i, GENERAL[_ana]->ROI[i]->resultklasse);
if (isLogImage)
{
if (isLogImageSelect)
{
if (LogImageSelect.find(GENERAL[_ana]->ROI[i]->name) != std::string::npos)
LogImage(logPath, GENERAL[_ana]->ROI[i]->name, NULL, &GENERAL[_ana]->ROI[i]->resultklasse, time, GENERAL[_ana]->ROI[i]->image_org);
}
else
{
LogImage(logPath, GENERAL[_ana]->ROI[i]->name, NULL, &GENERAL[_ana]->ROI[i]->resultklasse, time, GENERAL[_ana]->ROI[i]->image_org);
}
}
} break;
case DigitalHyprid:
{
int _num, _nachkomma;
tflite->LoadInputImageBasis(GENERAL[_ana]->ROI[i]->image);
tflite->Invoke();
if (debugdetailgeneral) LogFile.WriteToFile("Nach Invoke");
_num = tflite->GetOutClassification(0, 10);
_nachkomma = tflite->GetOutClassification(11, 22);
string _zwres = "Nach Invoke - Nummer: " + to_string(_num) + " Nachkomma: " + to_string(_nachkomma);
if (debugdetailgeneral) LogFile.WriteToFile(_zwres);
if ((_num == 10) || (_nachkomma == 10)) // NaN detektiert
GENERAL[_ana]->ROI[i]->result = -1;
else
GENERAL[_ana]->ROI[i]->result = fmod((double) _num + (((double)_nachkomma)-5)/10 + (double) 10, 10);
printf("Result General(DigitalHyprid)%i: %f\n", i, GENERAL[_ana]->ROI[i]->result);
_zwres = "Result General(DigitalHyprid)" + to_string(i) + ": " + to_string(GENERAL[_ana]->ROI[i]->result);
if (debugdetailgeneral) LogFile.WriteToFile(_zwres);
if (isLogImage)
LogImage(logPath, GENERAL[_ana]->ROI[i]->name, &GENERAL[_ana]->ROI[i]->result, NULL, time, GENERAL[_ana]->ROI[i]->image_org);
} break;
default:
break;
}
}
}
delete tflite;
return true;
}
bool ClassFlowCNNGeneral::isExtendedResolution(int _number)
{
if (extendedResolution && !(CNNType == Digital))
return true;
return false;
}
std::vector<HTMLInfo*> ClassFlowCNNGeneral::GetHTMLInfo()
{
std::vector<HTMLInfo*> result;
for (int _ana = 0; _ana < GENERAL.size(); ++_ana)
for (int i = 0; i < GENERAL[_ana]->ROI.size(); ++i)
{
if (GENERAL[_ana]->name == "default")
GENERAL[_ana]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + GENERAL[_ana]->ROI[i]->name + ".bmp"));
else
GENERAL[_ana]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + GENERAL[_ana]->name + "_" + GENERAL[_ana]->ROI[i]->name + ".bmp"));
HTMLInfo *zw = new HTMLInfo;
if (GENERAL[_ana]->name == "default")
{
zw->filename = GENERAL[_ana]->ROI[i]->name + ".bmp";
zw->filename_org = GENERAL[_ana]->ROI[i]->name + ".jpg";
}
else
{
zw->filename = GENERAL[_ana]->name + "_" + GENERAL[_ana]->ROI[i]->name + ".bmp";
zw->filename_org = GENERAL[_ana]->name + "_" + GENERAL[_ana]->ROI[i]->name + ".jpg";
}
zw->val = GENERAL[_ana]->ROI[i]->result;
zw->image = GENERAL[_ana]->ROI[i]->image;
zw->image_org = GENERAL[_ana]->ROI[i]->image_org;
result.push_back(zw);
}
return result;
}
int ClassFlowCNNGeneral::getAnzahlGENERAL()
{
return GENERAL.size();
}
string ClassFlowCNNGeneral::getNameGENERAL(int _analog)
{
if (_analog < GENERAL.size())
return GENERAL[_analog]->name;
return "GENERAL DOES NOT EXIST";
}
general* ClassFlowCNNGeneral::GetGENERAL(int _analog)
{
if (_analog < GENERAL.size())
return GENERAL[_analog];
return NULL;
}
void ClassFlowCNNGeneral::UpdateNameNumbers(std::vector<std::string> *_name_numbers)
{
for (int _dig = 0; _dig < GENERAL.size(); _dig++)
{
std::string _name = GENERAL[_dig]->name;
bool found = false;
for (int i = 0; i < (*_name_numbers).size(); ++i)
{
if ((*_name_numbers)[i] == _name)
found = true;
}
if (!found)
(*_name_numbers).push_back(_name);
}
}

View File

@@ -0,0 +1,77 @@
#pragma once
#include "ClassFlowImage.h"
#include "ClassFlowAlignment.h"
enum t_CNNType {
AutoDetect,
Analogue,
Digital,
DigitalHyprid,
None
};
struct roi {
int posx, posy, deltax, deltay;
float result;
int resultklasse;
string name;
CImageBasis *image, *image_org;
};
struct general {
string name;
std::vector<roi*> ROI;
};
class ClassFlowCNNGeneral :
public ClassFlowImage
{
protected:
t_CNNType CNNType;
std::vector<general*> GENERAL;
string cnnmodelfile;
int modelxsize, modelysize;
bool isLogImageSelect;
string LogImageSelect;
ClassFlowAlignment* flowpostalignment;
bool SaveAllFiles;
bool extendedResolution;
int ZeigerEval(float zahl, int ziffer_vorgaenger);
int ZeigerEvalHybrid(float zahl, float zahl_vorgaenger, int eval_vorgaenger);
bool doNeuralNetwork(string time);
bool doAlignAndCut(string time);
public:
ClassFlowCNNGeneral(ClassFlowAlignment *_flowalign, t_CNNType _cnntype = AutoDetect);
bool ReadParameter(FILE* pfile, string& aktparamgraph);
bool doFlow(string time);
string getHTMLSingleStep(string host);
string getReadout(int _analog);
void DrawROI(CImageBasis *_zw);
std::vector<HTMLInfo*> GetHTMLInfo();
int AnzahlROIs(int _analog);
int getAnzahlGENERAL();
general* GetGENERAL(int _analog);
general* GetGENERAL(string _name, bool _create);
general* FindGENERAL(string _name_number);
string getNameGENERAL(int _analog);
bool isExtendedResolution(int _number = 0);
void UpdateNameNumbers(std::vector<std::string> *_name_numbers);
t_CNNType getCNNType(){return CNNType;};
string name(){return "ClassFlowCNNGeneral";};
};

View File

@@ -1,6 +1,7 @@
#include "ClassFlowControll.h"
#include "connect_wlan.h"
#include "read_wlanini.h"
#include "freertos/task.h"
@@ -11,6 +12,9 @@
#include "Helper.h"
#include "server_ota.h"
//#include "CImg.h"
#include "server_help.h"
//#define DEBUG_DETAIL_ON
@@ -21,17 +25,20 @@ static const char* TAG = "flow_controll";
std::string ClassFlowControll::doSingleStep(std::string _stepname, std::string _host){
std::string _classname = "";
std::string result = "";
// printf("_stepname: %s\n", _stepname.c_str());
if ((_stepname.compare("[MakeImage]") == 0) || (_stepname.compare(";[MakeImage]") == 0)){
_classname = "ClassFlowMakeImage";
}
if ((_stepname.compare("[Alignment]") == 0) || (_stepname.compare(";[Alignment]") == 0)){
_classname = "ClassFlowAlignment";
}
if ((_stepname.compare("[Digits]") == 0) || (_stepname.compare(";[Digits]") == 0)){
_classname = "ClassFlowDigit";
if ((_stepname.compare(0, 7, "[Digits") == 0) || (_stepname.compare(0, 8, ";[Digits") == 0)) {
// if ((_stepname.compare("[Digits]") == 0) || (_stepname.compare(";[Digits]") == 0)){
// printf("Digits!!!\n");
_classname = "ClassFlowCNNGeneral";
}
if ((_stepname.compare("[Analog]") == 0) || (_stepname.compare(";[Analog]") == 0)){
_classname = "ClassFlowAnalog";
_classname = "ClassFlowCNNGeneral";
}
if ((_stepname.compare("[MQTT]") == 0) || (_stepname.compare(";[MQTT]") == 0)){
_classname = "ClassFlowMQTT";
@@ -47,11 +54,36 @@ std::string ClassFlowControll::doSingleStep(std::string _stepname, std::string _
return result;
}
std::string ClassFlowControll::TranslateAktstatus(std::string _input)
{
if (_input.compare("ClassFlowMakeImage") == 0)
return ("Take Image");
if (_input.compare("ClassFlowAlignment") == 0)
return ("Aligning");
//if (_input.compare("ClassFlowAnalog") == 0)
// return ("Analog ROIs");
if (_input.compare("ClassFlowCNNGeneral") == 0)
return ("Digitalization of ROIs");
if (_input.compare("ClassFlowMQTT") == 0)
return ("Sending MQTT");
if (_input.compare("ClassFlowPostProcessing") == 0)
return ("Processing");
return "Unkown Status";
}
std::vector<HTMLInfo*> ClassFlowControll::GetAllDigital()
{
/*
for (int i = 0; i < FlowControll.size(); ++i)
if (FlowControll[i]->name().compare("ClassFlowDigit") == 0)
return ((ClassFlowDigit*) (FlowControll[i]))->GetHTMLInfo();
if (FlowControll[i]->name().compare("ClassFlowCNNGeneral") == 0)
return ((ClassFlowCNNGeneral*) (FlowControll[i]))->GetHTMLInfo();
*/
if (flowdigit)
flowdigit->GetHTMLInfo();
std::vector<HTMLInfo*> empty;
return empty;
@@ -59,14 +91,52 @@ std::vector<HTMLInfo*> ClassFlowControll::GetAllDigital()
std::vector<HTMLInfo*> ClassFlowControll::GetAllAnalog()
{
/*
for (int i = 0; i < FlowControll.size(); ++i)
if (FlowControll[i]->name().compare("ClassFlowAnalog") == 0)
return ((ClassFlowAnalog*) (FlowControll[i]))->GetHTMLInfo();
std::vector<HTMLInfo*> empty;
return empty;
*/
if (flowanalog)
flowanalog->GetHTMLInfo();
std::vector<HTMLInfo*> empty;
return empty;
}
t_CNNType ClassFlowControll::GetTypeDigital()
{
if (flowdigit)
return flowdigit->getCNNType();
return t_CNNType::None;
}
t_CNNType ClassFlowControll::GetTypeAnalog()
{
if (flowanalog)
return flowanalog->getCNNType();
return t_CNNType::None;
}
string ClassFlowControll::GetMQTTMainTopic()
{
for (int i = 0; i < FlowControll.size(); ++i)
if (FlowControll[i]->name().compare("ClassFlowMQTT") == 0)
return ((ClassFlowMQTT*) (FlowControll[i]))->GetMQTTMainTopic();
return "";
}
void ClassFlowControll::SetInitialParameter(void)
{
@@ -78,7 +148,7 @@ void ClassFlowControll::SetInitialParameter(void)
flowpostprocessing = NULL;
disabled = false;
aktRunNr = 0;
aktstatus = "Startup";
aktstatus = "Booting ...";
}
@@ -106,19 +176,20 @@ ClassFlow* ClassFlowControll::CreateClassFlow(std::string _type)
}
if (toUpper(_type).compare("[ANALOG]") == 0)
{
cfc = new ClassFlowAnalog(&FlowControll);
flowanalog = (ClassFlowAnalog*) cfc;
cfc = new ClassFlowCNNGeneral(flowalignment);
flowanalog = (ClassFlowCNNGeneral*) cfc;
}
if (toUpper(_type).compare("[DIGITS]") == 0)
if (toUpper(_type).compare(0, 7, "[DIGITS") == 0)
{
cfc = new ClassFlowDigit(&FlowControll);
flowdigit = (ClassFlowDigit*) cfc;
cfc = new ClassFlowCNNGeneral(flowalignment);
flowdigit = (ClassFlowCNNGeneral*) cfc;
}
if (toUpper(_type).compare("[MQTT]") == 0)
cfc = new ClassFlowMQTT(&FlowControll);
if (toUpper(_type).compare("[POSTPROCESSING]") == 0)
{
cfc = new ClassFlowPostProcessing(&FlowControll);
cfc = new ClassFlowPostProcessing(&FlowControll, flowanalog, flowdigit);
flowpostprocessing = (ClassFlowPostProcessing*) cfc;
}
@@ -163,14 +234,17 @@ void ClassFlowControll::InitFlow(std::string config)
cfc = CreateClassFlow(line);
if (cfc)
{
printf("Start ReadParameter\n");
printf("Start ReadParameter (%s)\n", line.c_str());
cfc->ReadParameter(pFile, line);
}
else
{
fgets(zw, 1024, pFile);
printf("%s", zw);
line = std::string(zw);
line = "";
if (fgets(zw, 1024, pFile) && !feof(pFile))
{
printf("Read: %s", zw);
line = std::string(zw);
}
}
}
@@ -188,9 +262,9 @@ void ClassFlowControll::doFlowMakeImageOnly(string time){
for (int i = 0; i < FlowControll.size(); ++i)
{
if (FlowControll[i]->name() == "ClassFlowMakeImage") {
zw_time = gettimestring("%Y%m%d-%H%M%S");
aktstatus = zw_time + ": " + FlowControll[i]->name();
string zw = "FlowControll.doFlowMakeImageOnly - " + FlowControll[i]->name();
// zw_time = gettimestring("%Y%m%d-%H%M%S");
zw_time = gettimestring("%H:%M:%S");
aktstatus = TranslateAktstatus(FlowControll[i]->name()) + " (" + zw_time + ")";
FlowControll[i]->doFlow(time);
}
}
@@ -205,13 +279,16 @@ bool ClassFlowControll::doFlow(string time)
int repeat = 0;
#ifdef DEBUG_DETAIL_ON
LogFile.WriteHeapInfo("ClassFlowAnalog::doFlow - Start");
LogFile.WriteHeapInfo("ClassFlowControll::doFlow - Start");
#endif
for (int i = 0; i < FlowControll.size(); ++i)
{
zw_time = gettimestring("%Y%m%d-%H%M%S");
aktstatus = zw_time + ": " + FlowControll[i]->name();
zw_time = gettimestring("%H:%M:%S");
aktstatus = TranslateAktstatus(FlowControll[i]->name()) + " (" + zw_time + ")";
// zw_time = gettimestring("%Y%m%d-%H%M%S");
// aktstatus = zw_time + ": " + FlowControll[i]->name();
string zw = "FlowControll.doFlow - " + FlowControll[i]->name();
@@ -234,28 +311,54 @@ bool ClassFlowControll::doFlow(string time)
}
#ifdef DEBUG_DETAIL_ON
LogFile.WriteHeapInfo("ClassFlowAnalog::doFlow");
LogFile.WriteHeapInfo("ClassFlowControll::doFlow");
#endif
}
zw_time = gettimestring("%Y%m%d-%H%M%S");
aktstatus = zw_time + ": Flow is done";
zw_time = gettimestring("%H:%M:%S");
aktstatus = "Flow finished (" + zw_time + ")";
return result;
}
void ClassFlowControll::UpdateAktStatus(std::string _flow)
string ClassFlowControll::getReadoutAll(int _type)
{
aktstatus = gettimestring("%Y%m%d-%H%M%S");
aktstatus = aktstatus + "\t" + std::to_string(aktRunNr) + "\t";
if (_flow == "ClassFlowMakeImage")
aktstatus = aktstatus + "Taking Raw Image";
else
if (_flow == "ClassFlowAlignment")
aktstatus = aktstatus + "Aligning Image";
std::vector<NumberPost*> numbers = flowpostprocessing->GetNumbers();
std::string out = "";
for (int i = 0; i < numbers.size(); ++i)
{
out = out + numbers[i]->name + "\t";
switch (_type) {
case READOUT_TYPE_VALUE:
out = out + numbers[i]->ReturnValueNoError;
break;
case READOUT_TYPE_PREVALUE:
if (flowpostprocessing->PreValueUse)
{
if (numbers[i]->PreValueOkay)
out = out + numbers[i]->ReturnPreValue;
else
out = out + "PreValue too old";
}
else
out = out + "PreValue deactivated";
break;
case READOUT_TYPE_RAWVALUE:
out = out + numbers[i]->ReturnRawValue;
break;
case READOUT_TYPE_ERROR:
out = out + numbers[i]->ErrorMessageText;
break;
}
if (i < numbers.size()-1)
out = out + "\r\n";
}
}
// printf("OUT: %s", out.c_str());
return out;
}
string ClassFlowControll::getReadout(bool _rawvalue = false, bool _noerror = false)
@@ -281,17 +384,17 @@ string ClassFlowControll::getReadout(bool _rawvalue = false, bool _noerror = fal
return result;
}
string ClassFlowControll::GetPrevalue()
string ClassFlowControll::GetPrevalue(std::string _number)
{
if (flowpostprocessing)
{
return flowpostprocessing->GetPreValue();
return flowpostprocessing->GetPreValue(_number);
}
return std::string();
}
std::string ClassFlowControll::UpdatePrevalue(std::string _newvalue)
std::string ClassFlowControll::UpdatePrevalue(std::string _newvalue, std::string _numbers, bool _extern)
{
float zw;
char* p;
@@ -313,7 +416,7 @@ std::string ClassFlowControll::UpdatePrevalue(std::string _newvalue)
if (flowpostprocessing)
{
flowpostprocessing->SavePreValue(zw);
flowpostprocessing->SetPreValue(zw, _numbers, _extern);
return _newvalue;
}
@@ -382,6 +485,9 @@ bool ClassFlowControll::ReadParameter(FILE* pfile, string& aktparamgraph)
{
// reboot notwendig damit die neue wlan.ini auch benutzt wird !!!
fclose(pfile);
printf("do reboot\n");
esp_restart();
hard_restart();
doReboot();
}
}
@@ -402,6 +508,7 @@ bool ClassFlowControll::ReadParameter(FILE* pfile, string& aktparamgraph)
return true;
}
int ClassFlowControll::CleanTempFolder() {
const char* folderPath = "/sdcard/img_tmp";
@@ -435,7 +542,7 @@ int ClassFlowControll::CleanTempFolder() {
esp_err_t ClassFlowControll::SendRawJPG(httpd_req_t *req)
{
return flowmakeimage->SendRawJPG(req);
return flowmakeimage != NULL ? flowmakeimage->SendRawJPG(req) : ESP_FAIL;
}
@@ -447,6 +554,12 @@ esp_err_t ClassFlowControll::GetJPGStream(std::string _fn, httpd_req_t *req)
esp_err_t result = ESP_FAIL;
bool Dodelete = false;
if (flowalignment == NULL)
{
printf("Can't continue, flowalignment is NULL\n");
return ESP_FAIL;
}
if (_fn == "alg.jpg")
{
_send = flowalignment->ImageBasis;
@@ -460,6 +573,18 @@ esp_err_t ClassFlowControll::GetJPGStream(std::string _fn, httpd_req_t *req)
flowalignment->DrawRef(_imgzw);
if (flowdigit) flowdigit->DrawROI(_imgzw);
if (flowanalog) flowanalog->DrawROI(_imgzw);
/*/////////////////////////////////////
cimg_library::CImg<unsigned char> cimg(_imgzw->rgb_image, _imgzw->bpp, _imgzw->width, _imgzw->height, 1);
//Convert cimg type
// cimg.permute_axes("yzcx");
cimg.draw_text(300, 300, "Dies ist ein Test", "black");
//Convert back to stb type to save
// cimg.permute_axes("cxyz");
*////////////////////////////////////
_send = _imgzw;
Dodelete = true;
}
@@ -478,7 +603,9 @@ esp_err_t ClassFlowControll::GetJPGStream(std::string _fn, httpd_req_t *req)
if (htmlinfo[i]->image_org)
_send = htmlinfo[i]->image_org;
}
delete htmlinfo[i];
}
htmlinfo.clear();
htmlinfo = GetAllAnalog();
for (int i = 0; i < htmlinfo.size(); ++i)
@@ -493,7 +620,9 @@ esp_err_t ClassFlowControll::GetJPGStream(std::string _fn, httpd_req_t *req)
if (htmlinfo[i]->image_org)
_send = htmlinfo[i]->image_org;
}
delete htmlinfo[i];
}
htmlinfo.clear();
if (_send)
{

View File

@@ -5,10 +5,17 @@
#include "ClassFlow.h"
#include "ClassFlowMakeImage.h"
#include "ClassFlowAlignment.h"
#include "ClassFlowDigit.h"
#include "ClassFlowAnalog.h"
//#include "ClassFlowDigit.h"
#include "ClassFlowCNNGeneral.h"
#include "ClassFlowPostProcessing.h"
#include "ClassFlowMQTT.h"
#include "ClassFlowCNNGeneral.h"
#define READOUT_TYPE_VALUE 0
#define READOUT_TYPE_PREVALUE 1
#define READOUT_TYPE_RAWVALUE 2
#define READOUT_TYPE_ERROR 3
class ClassFlowControll :
@@ -18,8 +25,9 @@ protected:
std::vector<ClassFlow*> FlowControll;
ClassFlowPostProcessing* flowpostprocessing;
ClassFlowAlignment* flowalignment;
ClassFlowAnalog* flowanalog;
ClassFlowDigit* flowdigit;
ClassFlowCNNGeneral* flowanalog;
ClassFlowCNNGeneral* flowdigit;
// ClassFlowDigit* flowdigit;
ClassFlowMakeImage* flowmakeimage;
ClassFlow* CreateClassFlow(std::string _type);
@@ -30,18 +38,21 @@ protected:
std::string aktstatus;
int aktRunNr;
void UpdateAktStatus(std::string _flow);
public:
void InitFlow(std::string config);
bool doFlow(string time);
void doFlowMakeImageOnly(string time);
bool getStatusSetupModus(){return SetupModeActive;};
string getReadout(bool _rawvalue, bool _noerror);
string UpdatePrevalue(std::string _newvalue);
string GetPrevalue();
string getReadoutAll(int _type);
string UpdatePrevalue(std::string _newvalue, std::string _numbers, bool _extern);
string GetPrevalue(std::string _number = "");
bool ReadParameter(FILE* pfile, string& aktparamgraph);
string TranslateAktstatus(std::string _input);
string GetMQTTMainTopic();
esp_err_t GetJPGStream(std::string _fn, httpd_req_t *req);
esp_err_t SendRawJPG(httpd_req_t *req);
@@ -54,6 +65,9 @@ public:
std::vector<HTMLInfo*> GetAllDigital();
std::vector<HTMLInfo*> GetAllAnalog();
t_CNNType GetTypeDigital();
t_CNNType GetTypeAnalog();
int CleanTempFolder();
string name(){return "ClassFlowControll";};

View File

@@ -1,275 +0,0 @@
#include "ClassFlowDigit.h"
//#include "CFindTemplate.h"
//#include "CTfLiteClass.h"
// #define OHNETFLITE
#ifndef OHNETFLITE
#include "CTfLiteClass.h"
#endif
// #include "bitmap_image.hpp"
#include "ClassLogFile.h"
static const char* TAG = "flow_digital";
void ClassFlowDigit::SetInitialParameter(void)
{
string cnnmodelfile = "";
modelxsize = 1;
modelysize = 1;
ListFlowControll = NULL;
previousElement = NULL;
SaveAllFiles = false;
disabled = false;
}
ClassFlowDigit::ClassFlowDigit() : ClassFlowImage(TAG)
{
SetInitialParameter();
}
ClassFlowDigit::ClassFlowDigit(std::vector<ClassFlow*>* lfc) : ClassFlowImage(lfc, TAG)
{
SetInitialParameter();
ListFlowControll = lfc;
for (int i = 0; i < ListFlowControll->size(); ++i)
{
if (((*ListFlowControll)[i])->name().compare("ClassFlowAlignment") == 0)
{
flowpostalignment = (ClassFlowAlignment*) (*ListFlowControll)[i];
}
}
}
ClassFlowDigit::ClassFlowDigit(std::vector<ClassFlow*>* lfc, ClassFlow *_prev) : ClassFlowImage(lfc, _prev, TAG)
{
SetInitialParameter();
ListFlowControll = lfc;
previousElement = _prev;
for (int i = 0; i < ListFlowControll->size(); ++i)
{
if (((*ListFlowControll)[i])->name().compare("ClassFlowAlignment") == 0)
{
flowpostalignment = (ClassFlowAlignment*) (*ListFlowControll)[i];
}
}
}
string ClassFlowDigit::getReadout()
{
string rst = "";
for (int i = 0; i < ROI.size(); ++i)
{
if (ROI[i]->resultklasse == 10)
rst = rst + "N";
else
rst = rst + std::to_string(ROI[i]->resultklasse);
}
return rst;
}
bool ClassFlowDigit::ReadParameter(FILE* pfile, string& aktparamgraph)
{
std::vector<string> zerlegt;
aktparamgraph = trim(aktparamgraph);
if (aktparamgraph.size() == 0)
if (!this->GetNextParagraph(pfile, aktparamgraph))
return false;
if ((aktparamgraph.compare("[Digits]") != 0) && (aktparamgraph.compare(";[Digits]") != 0)) // Paragraph passt nich zu MakeImage
return false;
if (aktparamgraph[0] == ';')
{
disabled = true;
while (getNextLine(pfile, &aktparamgraph) && !isNewParagraph(aktparamgraph));
printf("[Digits] is disabled !!!\n");
return true;
}
while (getNextLine(pfile, &aktparamgraph) && !isNewParagraph(aktparamgraph))
{
zerlegt = this->ZerlegeZeile(aktparamgraph);
if ((zerlegt[0] == "LogImageLocation") && (zerlegt.size() > 1))
{
LogImageLocation = "/sdcard" + zerlegt[1];
isLogImage = true;
}
if ((zerlegt[0] == "Model") && (zerlegt.size() > 1))
{
cnnmodelfile = zerlegt[1];
}
if ((zerlegt[0] == "ModelInputSize") && (zerlegt.size() > 2))
{
modelxsize = std::stoi(zerlegt[1]);
modelysize = std::stoi(zerlegt[2]);
}
if (zerlegt.size() >= 5)
{
roi* neuroi = new roi;
neuroi->name = zerlegt[0];
neuroi->posx = std::stoi(zerlegt[1]);
neuroi->posy = std::stoi(zerlegt[2]);
neuroi->deltax = std::stoi(zerlegt[3]);
neuroi->deltay = std::stoi(zerlegt[4]);
neuroi->resultklasse = -1;
neuroi->image = NULL;
neuroi->image_org = NULL;
ROI.push_back(neuroi);
}
if ((toUpper(zerlegt[0]) == "SAVEALLFILES") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
SaveAllFiles = true;
}
}
for (int i = 0; i < ROI.size(); ++i)
{
ROI[i]->image = new CImageBasis(modelxsize, modelysize, 3);
ROI[i]->image_org = new CImageBasis(ROI[i]->deltax, ROI[i]->deltay, 3);
}
return true;
}
string ClassFlowDigit::getHTMLSingleStep(string host)
{
string result, zw;
std::vector<HTMLInfo*> htmlinfo;
result = "<p>Found ROIs: </p> <p><img src=\"" + host + "/img_tmp/alg_roi.jpg\"></p>\n";
result = result + "Digital Counter: <p> ";
htmlinfo = GetHTMLInfo();
for (int i = 0; i < htmlinfo.size(); ++i)
{
if (htmlinfo[i]->val == 10)
zw = "NaN";
else
{
zw = to_string((int) htmlinfo[i]->val);
}
result = result + "<img src=\"" + host + "/img_tmp/" + htmlinfo[i]->filename + "\"> " + zw;
delete htmlinfo[i];
}
htmlinfo.clear();
return result;
}
bool ClassFlowDigit::doFlow(string time)
{
if (disabled)
return true;
if (!doAlignAndCut(time)){
return false;
};
doNeuralNetwork(time);
RemoveOldLogs();
return true;
}
bool ClassFlowDigit::doAlignAndCut(string time)
{
if (disabled)
return true;
CAlignAndCutImage *caic = flowpostalignment->GetAlignAndCutImage();
for (int i = 0; i < ROI.size(); ++i)
{
printf("DigitalDigit %d - Align&Cut\n", i);
caic->CutAndSave(ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay, ROI[i]->image_org);
if (SaveAllFiles) ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ROI[i]->name + ".jpg"));
ROI[i]->image_org->Resize(modelxsize, modelysize, ROI[i]->image);
if (SaveAllFiles) ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ROI[i]->name + ".bmp"));
}
return true;
}
bool ClassFlowDigit::doNeuralNetwork(string time)
{
if (disabled)
return true;
string logPath = CreateLogFolder(time);
#ifndef OHNETFLITE
CTfLiteClass *tflite = new CTfLiteClass;
string zwcnn = FormatFileName("/sdcard" + cnnmodelfile);
printf(zwcnn.c_str());printf("\n");
tflite->LoadModel(zwcnn);
tflite->MakeAllocate();
#endif
for (int i = 0; i < ROI.size(); ++i)
{
printf("DigitalDigit %d - TfLite\n", i);
ROI[i]->resultklasse = 0;
#ifndef OHNETFLITE
ROI[i]->resultklasse = tflite->GetClassFromImageBasis(ROI[i]->image);
#endif
printf("Result Digit%i: %d\n", i, ROI[i]->resultklasse);
if (isLogImage)
{
LogImage(logPath, ROI[i]->name, NULL, &ROI[i]->resultklasse, time, ROI[i]->image_org);
}
}
#ifndef OHNETFLITE
delete tflite;
#endif
return true;
}
void ClassFlowDigit::DrawROI(CImageBasis *_zw)
{
for (int i = 0; i < ROI.size(); ++i)
_zw->drawRect(ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay, 0, 0, 255, 2);
}
std::vector<HTMLInfo*> ClassFlowDigit::GetHTMLInfo()
{
std::vector<HTMLInfo*> result;
for (int i = 0; i < ROI.size(); ++i)
{
HTMLInfo *zw = new HTMLInfo;
zw->filename = ROI[i]->name + ".bmp";
zw->filename_org = ROI[i]->name + ".jpg";
zw->val = ROI[i]->resultklasse;
zw->image = ROI[i]->image;
zw->image_org = ROI[i]->image_org;
result.push_back(zw);
}
return result;
}

View File

@@ -1,46 +0,0 @@
#pragma once
#include "ClassFlowImage.h"
#include "ClassFlowAlignment.h"
#include "Helper.h"
#include <string>
struct roi {
int posx, posy, deltax, deltay;
int resultklasse;
string name;
CImageBasis *image, *image_org;
roi* next;
};
class ClassFlowDigit :
public ClassFlowImage
{
protected:
std::vector<roi*> ROI;
string cnnmodelfile;
int modelxsize, modelysize;
bool SaveAllFiles;
ClassFlowAlignment* flowpostalignment;
bool doNeuralNetwork(string time);
bool doAlignAndCut(string time);
void SetInitialParameter(void);
public:
ClassFlowDigit();
ClassFlowDigit(std::vector<ClassFlow*>* lfc);
ClassFlowDigit(std::vector<ClassFlow*>* lfc, ClassFlow *_prev);
bool ReadParameter(FILE* pfile, string& aktparamgraph);
bool doFlow(string time);
string getHTMLSingleStep(string host);
string getReadout();
std::vector<HTMLInfo*> GetHTMLInfo();
void DrawROI(CImageBasis *_zw);
string name(){return "ClassFlowDigit";};
};

View File

@@ -48,9 +48,14 @@ void ClassFlowImage::LogImage(string logPath, string name, float *resultFloat, i
if (!isLogImage)
return;
char buf[10];
if (resultFloat != NULL) {
sprintf(buf, "%.1f_", *resultFloat);
if (*resultFloat < 0)
sprintf(buf, "N.N_");
else
sprintf(buf, "%.1f_", *resultFloat);
} else if (resultInt != NULL) {
sprintf(buf, "%d_", *resultInt);
} else {

View File

@@ -1,6 +1,8 @@
#include <sstream>
#include "ClassFlowMQTT.h"
#include "Helper.h"
#include "time_sntp.h"
#include "interface_mqtt.h"
#include "ClassFlowPostProcessing.h"
@@ -11,6 +13,13 @@ void ClassFlowMQTT::SetInitialParameter(void)
uri = "";
topic = "";
topicError = "";
topicRate = "";
topicTimeStamp = "";
maintopic = "";
mainerrortopic = "";
topicUptime = "";
topicFreeMem = "";
clientname = "watermeter";
OldValue = "";
flowpostprocessing = NULL;
@@ -19,6 +28,9 @@ void ClassFlowMQTT::SetInitialParameter(void)
previousElement = NULL;
ListFlowControll = NULL;
disabled = false;
MQTTenable = false;
}
@@ -86,40 +98,98 @@ bool ClassFlowMQTT::ReadParameter(FILE* pfile, string& aktparamgraph)
{
this->uri = zerlegt[1];
}
if ((toUpper(zerlegt[0]) == "TOPIC") && (zerlegt.size() > 1))
{
this->topic = zerlegt[1];
}
if ((toUpper(zerlegt[0]) == "TOPICERROR") && (zerlegt.size() > 1))
{
this->topicError = zerlegt[1];
}
if ((toUpper(zerlegt[0]) == "CLIENTID") && (zerlegt.size() > 1))
{
this->clientname = zerlegt[1];
}
if (((toUpper(zerlegt[0]) == "TOPIC") || (toUpper(zerlegt[0]) == "MAINTOPIC")) && (zerlegt.size() > 1))
{
maintopic = zerlegt[1];
}
}
if ((uri.length() > 0) && (topic.length() > 0))
if (!MQTTisConnected() && (uri.length() > 0) && (maintopic.length() > 0))
{
MQTTInit(uri, clientname, user, password, topicError, 60);
mainerrortopic = maintopic + "/connection";
MQTTInit(uri, clientname, user, password, mainerrortopic, 60);
MQTTPublish(mainerrortopic, "connected");
MQTTenable = true;
}
return true;
}
string ClassFlowMQTT::GetMQTTMainTopic()
{
return maintopic;
}
bool ClassFlowMQTT::doFlow(string zwtime)
{
if (!MQTTenable)
return true;
std::string result;
std::string resulterror = "";
std::string resultrate = "";
std::string resulttimestamp = "";
string zw = "";
string namenumber = "";
MQTTPublish(mainerrortopic, "connected");
zw = maintopic + "/" + "uptime";
char uptimeStr[11];
sprintf(uptimeStr, "%ld", (long)getUpTime());
MQTTPublish(zw, uptimeStr);
zw = maintopic + "/" + "freeMem";
char freeheapmem[11];
sprintf(freeheapmem, "%zu", esp_get_free_heap_size());
MQTTPublish(zw, freeheapmem);
if (flowpostprocessing)
{
result = flowpostprocessing->getReadoutParam(false, true);
resulterror = flowpostprocessing->getReadoutError();
std::vector<NumberPost*> NUMBERS = flowpostprocessing->GetNumbers();
for (int i = 0; i < NUMBERS.size(); ++i)
{
result = NUMBERS[i]->ReturnValueNoError;
resulterror = NUMBERS[i]->ErrorMessageText;
resultrate = std::to_string(NUMBERS[i]->FlowRateAct);
resulttimestamp = NUMBERS[i]->timeStamp;
namenumber = NUMBERS[i]->name;
if (namenumber == "default")
namenumber = maintopic + "/";
else
namenumber = maintopic + "/" + namenumber + "/";
zw = namenumber + "value";
MQTTPublish(zw, result);
zw = namenumber + "error";
MQTTPublish(zw, resulterror, 1);
zw = namenumber + "rate";
MQTTPublish(zw, resultrate);
zw = namenumber + "timestamp";
MQTTPublish(zw, resulttimestamp);
std::string json="{\"value\":"+result;
json += ",\"error\":\""+resulterror;
json += "\",\"rate\":"+resultrate;
json += ",\"timestamp\":\""+resulttimestamp+"\"}";
zw = namenumber + "json";
MQTTPublish(zw, json);
}
}
else
{
@@ -134,14 +204,9 @@ bool ClassFlowMQTT::doFlow(string zwtime)
result = result + "\t" + zw;
}
}
MQTTPublish(topic, result);
}
MQTTPublish(topic, result);
if (topicError.length() > 0) {
MQTTPublish(topicError, resulterror);
}
OldValue = result;
return true;

View File

@@ -9,10 +9,13 @@ class ClassFlowMQTT :
public ClassFlow
{
protected:
std::string uri, topic, topicError, clientname;
std::string uri, topic, topicError, clientname, topicRate, topicTimeStamp, topicUptime, topicFreeMem;
std::string OldValue;
ClassFlowPostProcessing* flowpostprocessing;
std::string user, password;
std::string user, password;
bool MQTTenable;
std::string maintopic, mainerrortopic;
void SetInitialParameter(void);
public:
@@ -20,6 +23,8 @@ public:
ClassFlowMQTT(std::vector<ClassFlow*>* lfc);
ClassFlowMQTT(std::vector<ClassFlow*>* lfc, ClassFlow *_prev);
string GetMQTTMainTopic();
bool ReadParameter(FILE* pfile, string& aktparamgraph);
bool doFlow(string time);
string name(){return "ClassFlowMQTT";};

View File

@@ -14,12 +14,23 @@ static const char* TAG = "flow_make_image";
esp_err_t ClassFlowMakeImage::camera_capture(){
string nm = namerawimage;
Camera.CaptureToFile(nm);
time(&TimeImageTaken);
localtime(&TimeImageTaken);
return ESP_OK;
}
void ClassFlowMakeImage::takePictureWithFlash(int flashdauer)
{
// für den Fall, dass das Bild geflippt wird, muss es hier zurück gesetzt werden ////
rawImage->width = image_width;
rawImage->height = image_height;
/////////////////////////////////////////////////////////////////////////////////////
printf("Flashdauer: %d\n", flashdauer);
Camera.CaptureToBasisImage(rawImage, flashdauer);
time(&TimeImageTaken);
localtime(&TimeImageTaken);
if (SaveAllFiles) rawImage->SaveToFile(namerawimage);
}
@@ -82,6 +93,12 @@ bool ClassFlowMakeImage::ReadParameter(FILE* pfile, string& aktparamgraph)
if (toUpper(zerlegt[1]) == "TRUE")
SaveAllFiles = true;
}
if ((toUpper(zerlegt[0]) == "WAITBEFORETAKINGPICTURE") && (zerlegt.size() > 1))
{
waitbeforepicture = stoi(zerlegt[1]);
}
if ((toUpper(zerlegt[0]) == "BRIGHTNESS") && (zerlegt.size() > 1))
{
@@ -114,9 +131,9 @@ bool ClassFlowMakeImage::ReadParameter(FILE* pfile, string& aktparamgraph)
rawImage->CreateEmptyImage(image_width, image_height, 3);
waitbeforepicture_store = waitbeforepicture;
if (FixedExposure)
if (FixedExposure && (waitbeforepicture > 0))
{
printf("Fixed Exposure enabled!\n");
// printf("Fixed Exposure enabled!\n");
int flashdauer = (int) (waitbeforepicture * 1000);
Camera.EnableAutoExposure(flashdauer);
waitbeforepicture = 0.2;
@@ -165,6 +182,9 @@ bool ClassFlowMakeImage::doFlow(string zwtime)
esp_err_t ClassFlowMakeImage::SendRawJPG(httpd_req_t *req)
{
int flashdauer = (int) (waitbeforepicture * 1000);
time(&TimeImageTaken);
localtime(&TimeImageTaken);
return Camera.CaptureToHTTP(req, flashdauer);
}
@@ -175,6 +195,9 @@ ImageData* ClassFlowMakeImage::SendRawImage()
ImageData *id;
int flashdauer = (int) (waitbeforepicture * 1000);
Camera.CaptureToBasisImage(zw, flashdauer);
time(&TimeImageTaken);
localtime(&TimeImageTaken);
id = zw->writeToMemoryAsJPG();
delete zw;
return id;

View File

@@ -1,8 +1,6 @@
#include "ClassFlowPostProcessing.h"
#include "Helper.h"
#include "ClassFlowAnalog.h"
#include "ClassFlowDigit.h"
#include "ClassFlowMakeImage.h"
#include "ClassLogFile.h"
@@ -11,153 +9,330 @@
#include <time.h>
string ClassFlowPostProcessing::GetPreValue()
#include "time_sntp.h"
#define PREVALUE_TIME_FORMAT_OUTPUT "%Y-%m-%dT%H:%M:%S"
#define PREVALUE_TIME_FORMAT_INPUT "%d-%d-%dT%d:%d:%d"
string ClassFlowPostProcessing::GetPreValue(std::string _number)
{
std::string result;
bool isAnalog = false;
bool isDigit = false;
int index = -1;
int AnzahlAnalog = 0;
result = RundeOutput(PreValue, -DecimalShift);
if (_number == "")
_number = "default";
for (int i = 0; i < ListFlowControll->size(); ++i)
{
if (((*ListFlowControll)[i])->name().compare("ClassFlowAnalog") == 0)
{
isAnalog = true;
AnzahlAnalog = ((ClassFlowAnalog*)(*ListFlowControll)[i])->AnzahlROIs();
}
if (((*ListFlowControll)[i])->name().compare("ClassFlowDigit") == 0)
{
isDigit = true;
}
}
for (int i = 0; i < NUMBERS.size(); ++i)
if (NUMBERS[i]->name == _number)
index = i;
if (isDigit && isAnalog)
result = RundeOutput(PreValue, AnzahlAnalog - DecimalShift);
result = RundeOutput(NUMBERS[index]->PreValue, NUMBERS[index]->Nachkomma);
return result;
}
void ClassFlowPostProcessing::SetPreValue(float zw, string _numbers, bool _extern)
{
printf("SetPrevalue: %f, %s\n", zw, _numbers.c_str());
for (int j = 0; j < NUMBERS.size(); ++j)
{
// printf("Number %d, %s\n", j, NUMBERS[j]->name.c_str());
if (NUMBERS[j]->name == _numbers)
{
NUMBERS[j]->PreValue = zw;
if (_extern)
{
time(&(NUMBERS[j]->lastvalue));
localtime(&(NUMBERS[j]->lastvalue));
}
// printf("Found %d! - set to %f\n", j, NUMBERS[j]->PreValue);
}
}
UpdatePreValueINI = true;
SavePreValue();
}
bool ClassFlowPostProcessing::LoadPreValue(void)
{
std::vector<string> zerlegt;
FILE* pFile;
char zw[1024];
string zwtime, zwvalue;
string zwtime, zwvalue, name;
bool _done = false;
UpdatePreValueINI = false; // Konvertierung ins neue Format
pFile = fopen(FilePreValue.c_str(), "r");
if (pFile == NULL)
return false;
fgets(zw, 1024, pFile);
printf("%s", zw);
printf("Read Zeile Prevalue.ini: %s", zw);
zwtime = trim(std::string(zw));
fgets(zw, 1024, pFile);
fclose(pFile);
printf("%s", zw);
zwvalue = trim(std::string(zw));
PreValue = stof(zwvalue.c_str());
time_t tStart;
int yy, month, dd, hh, mm, ss;
struct tm whenStart;
sscanf(zwtime.c_str(), "%d-%d-%d_%d-%d-%d", &yy, &month, &dd, &hh, &mm, &ss);
whenStart.tm_year = yy - 1900;
whenStart.tm_mon = month - 1;
whenStart.tm_mday = dd;
whenStart.tm_hour = hh;
whenStart.tm_min = mm;
whenStart.tm_sec = ss;
whenStart.tm_isdst = -1;
tStart = mktime(&whenStart);
time_t now;
time(&now);
localtime(&now);
double difference = difftime(now, tStart);
difference /= 60;
if (difference > PreValueAgeStartup)
if (zwtime.length() == 0)
return false;
Value = PreValue;
ReturnValue = to_string(Value);
ReturnValueNoError = ReturnValue;
bool isAnalog = false;
bool isDigit = false;
int AnzahlAnalog = 0;
for (int i = 0; i < ListFlowControll->size(); ++i)
zerlegt = HelperZerlegeZeile(zwtime, "\t");
if (zerlegt.size() > 1) // neues Format
{
if (((*ListFlowControll)[i])->name().compare("ClassFlowAnalog") == 0)
isAnalog = true;
if (((*ListFlowControll)[i])->name().compare("ClassFlowDigit") == 0)
isDigit = true;
}
while ((zerlegt.size() > 1) && !_done)
{
name = trim(zerlegt[0]);
zwtime = trim(zerlegt[1]);
zwvalue = trim(zerlegt[2]);
if (isDigit || isAnalog)
for (int j = 0; j < NUMBERS.size(); ++j)
{
if (NUMBERS[j]->name == name)
{
NUMBERS[j]->PreValue = stof(zwvalue.c_str());
NUMBERS[j]->ReturnPreValue = RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
time_t tStart;
int yy, month, dd, hh, mm, ss;
struct tm whenStart;
sscanf(zwtime.c_str(), PREVALUE_TIME_FORMAT_INPUT, &yy, &month, &dd, &hh, &mm, &ss);
whenStart.tm_year = yy - 1900;
whenStart.tm_mon = month - 1;
whenStart.tm_mday = dd;
whenStart.tm_hour = hh;
whenStart.tm_min = mm;
whenStart.tm_sec = ss;
whenStart.tm_isdst = -1;
NUMBERS[j]->lastvalue = mktime(&whenStart);
time(&tStart);
localtime(&tStart);
double difference = difftime(tStart, NUMBERS[j]->lastvalue);
difference /= 60;
if (difference > PreValueAgeStartup)
{
NUMBERS[j]->PreValueOkay = false;
}
else
{
NUMBERS[j]->PreValueOkay = true;
NUMBERS[j]->Value = NUMBERS[j]->PreValue;
NUMBERS[j]->ReturnValue = to_string(NUMBERS[j]->Value);
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
if (NUMBERS[j]->digit_roi || NUMBERS[j]->analog_roi)
{
NUMBERS[j]->ReturnValue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift);
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
}
}
}
}
if (!fgets(zw, 1024, pFile))
_done = true;
else
{
printf("Read Zeile Prevalue.ini: %s", zw);
zerlegt = HelperZerlegeZeile(trim(std::string(zw)), "\t");
if (zerlegt.size() > 1)
{
name = trim(zerlegt[0]);
zwtime = trim(zerlegt[1]);
zwvalue = trim(zerlegt[2]);
}
}
}
fclose(pFile);
}
else // altes Format
{
ReturnValue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
ReturnValueNoError = ReturnValue;
}
fgets(zw, 1024, pFile);
fclose(pFile);
printf("%s", zw);
zwvalue = trim(std::string(zw));
NUMBERS[0]->PreValue = stof(zwvalue.c_str());
time_t tStart;
int yy, month, dd, hh, mm, ss;
struct tm whenStart;
sscanf(zwtime.c_str(), PREVALUE_TIME_FORMAT_INPUT, &yy, &month, &dd, &hh, &mm, &ss);
whenStart.tm_year = yy - 1900;
whenStart.tm_mon = month - 1;
whenStart.tm_mday = dd;
whenStart.tm_hour = hh;
whenStart.tm_min = mm;
whenStart.tm_sec = ss;
whenStart.tm_isdst = -1;
printf("TIME: %d, %d, %d, %d, %d, %d\n", whenStart.tm_year, whenStart.tm_mon, whenStart.tm_wday, whenStart.tm_hour, whenStart.tm_min, whenStart.tm_sec);
NUMBERS[0]->lastvalue = mktime(&whenStart);
time(&tStart);
localtime(&tStart);
double difference = difftime(tStart, NUMBERS[0]->lastvalue);
difference /= 60;
if (difference > PreValueAgeStartup)
return false;
NUMBERS[0]->Value = NUMBERS[0]->PreValue;
NUMBERS[0]->ReturnValue = to_string(NUMBERS[0]->Value);
NUMBERS[0]->ReturnValueNoError = NUMBERS[0]->ReturnValue;
if (NUMBERS[0]->digit_roi || NUMBERS[0]->analog_roi)
{
NUMBERS[0]->ReturnValue = RundeOutput(NUMBERS[0]->Value, NUMBERS[0]->AnzahlAnalog - NUMBERS[0]->DecimalShift);
NUMBERS[0]->ReturnValueNoError = NUMBERS[0]->ReturnValue;
}
UpdatePreValueINI = true; // Konvertierung ins neue Format
SavePreValue();
}
return true;
}
void ClassFlowPostProcessing::SavePreValue(float value, string zwtime)
void ClassFlowPostProcessing::SavePreValue()
{
FILE* pFile;
string _zw;
if (!UpdatePreValueINI) // PreValues unverändert --> File muss nicht neu geschrieben werden
return;
pFile = fopen(FilePreValue.c_str(), "w");
if (strlen(zwtime.c_str()) == 0)
for (int j = 0; j < NUMBERS.size(); ++j)
{
time_t rawtime;
struct tm* timeinfo;
char buffer[80];
struct tm* timeinfo = localtime(&NUMBERS[j]->lastvalue);
strftime(buffer, 80, PREVALUE_TIME_FORMAT_OUTPUT, timeinfo);
NUMBERS[j]->timeStamp = std::string(buffer);
// printf("SaverPreValue %d, Value: %f, Nachkomma %d\n", j, NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
time(&rawtime);
timeinfo = localtime(&rawtime);
_zw = NUMBERS[j]->name + "\t" + NUMBERS[j]->timeStamp + "\t" + RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma) + "\n";
printf("Write PreValue Zeile: %s\n", _zw.c_str());
strftime(buffer, 80, "%Y-%m-%d_%H-%M-%S", timeinfo);
zwtime = std::string(buffer);
fputs(_zw.c_str(), pFile);
}
PreValue = value;
fputs(zwtime.c_str(), pFile);
fputs("\n", pFile);
fputs(to_string(value).c_str(), pFile);
fputs("\n", pFile);
UpdatePreValueINI = false;
fclose(pFile);
}
ClassFlowPostProcessing::ClassFlowPostProcessing(std::vector<ClassFlow*>* lfc)
ClassFlowPostProcessing::ClassFlowPostProcessing(std::vector<ClassFlow*>* lfc, ClassFlowCNNGeneral *_analog, ClassFlowCNNGeneral *_digit)
{
PreValueUse = false;
PreValueAgeStartup = 30;
AllowNegativeRates = false;
MaxRateValue = 0.1;
ErrorMessage = false;
ListFlowControll = NULL;
PreValueOkay = false;
useMaxRateValue = false;
checkDigitIncreaseConsistency = false;
DecimalShift = 0;
ErrorMessageText = "";
FilePreValue = FormatFileName("/sdcard/config/prevalue.ini");
ListFlowControll = lfc;
flowMakeImage = NULL;
UpdatePreValueINI = false;
IgnoreLeadingNaN = false;
flowAnalog = _analog;
flowDigit = _digit;
for (int i = 0; i < ListFlowControll->size(); ++i)
{
if (((*ListFlowControll)[i])->name().compare("ClassFlowMakeImage") == 0)
{
flowMakeImage = (ClassFlowMakeImage*) (*ListFlowControll)[i];
}
}
}
void ClassFlowPostProcessing::handleDecimalSeparator(string _decsep, string _value)
{
string _digit, _decpos;
int _pospunkt = _decsep.find_first_of(".");
// printf("Name: %s, Pospunkt: %d\n", _decsep.c_str(), _pospunkt);
if (_pospunkt > -1)
_digit = _decsep.substr(0, _pospunkt);
else
_digit = "default";
for (int j = 0; j < NUMBERS.size(); ++j)
{
int _zwdc = 0;
try
{
_zwdc = stoi(_value);
}
catch(const std::exception& e)
{
printf("ERROR - Decimalshift is not a number: %s\n", _value.c_str());
}
if (_digit == "default") // erstmal auf default setzen (falls sonst nichts gesetzt)
{
NUMBERS[j]->DecimalShift = _zwdc;
NUMBERS[j]->DecimalShiftInitial = _zwdc;
}
if (NUMBERS[j]->name == _digit)
{
NUMBERS[j]->DecimalShift = _zwdc;
NUMBERS[j]->DecimalShiftInitial = _zwdc;
}
NUMBERS[j]->Nachkomma = NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift;
}
}
void ClassFlowPostProcessing::handleMaxRateValue(string _decsep, string _value)
{
string _digit, _decpos;
int _pospunkt = _decsep.find_first_of(".");
// printf("Name: %s, Pospunkt: %d\n", _decsep.c_str(), _pospunkt);
if (_pospunkt > -1)
_digit = _decsep.substr(0, _pospunkt);
else
_digit = "default";
for (int j = 0; j < NUMBERS.size(); ++j)
{
float _zwdc = 1;
try
{
_zwdc = stof(_value);
}
catch(const std::exception& e)
{
printf("ERROR - MaxRateValue is not a number: %s\n", _value.c_str());
}
if (_digit == "default") // erstmal auf default setzen (falls sonst nichts gesetzt)
{
NUMBERS[j]->useMaxRateValue = true;
NUMBERS[j]->MaxRateValue = _zwdc;
}
if (NUMBERS[j]->name == _digit)
{
NUMBERS[j]->useMaxRateValue = true;
NUMBERS[j]->MaxRateValue = _zwdc;
}
}
}
bool ClassFlowPostProcessing::ReadParameter(FILE* pfile, string& aktparamgraph)
{
std::vector<string> zerlegt;
int _n;
aktparamgraph = trim(aktparamgraph);
@@ -169,53 +344,144 @@ bool ClassFlowPostProcessing::ReadParameter(FILE* pfile, string& aktparamgraph)
if (aktparamgraph.compare("[PostProcessing]") != 0) // Paragraph passt nich zu MakeImage
return false;
InitNUMBERS();
while (this->getNextLine(pfile, &aktparamgraph) && !this->isNewParagraph(aktparamgraph))
{
zerlegt = this->ZerlegeZeile(aktparamgraph);
if ((toUpper(zerlegt[0]) == "DECIMALSHIFT") && (zerlegt.size() > 1))
std::string _param = GetParameterName(zerlegt[0]);
if ((toUpper(_param) == "DECIMALSHIFT") && (zerlegt.size() > 1))
{
DecimalShift = stoi(zerlegt[1]);
handleDecimalSeparator(zerlegt[0], zerlegt[1]);
}
if ((toUpper(_param) == "MAXRATEVALUE") && (zerlegt.size() > 1))
{
handleMaxRateValue(zerlegt[0], zerlegt[1]);
}
if ((toUpper(zerlegt[0]) == "PREVALUEUSE") && (zerlegt.size() > 1))
if ((toUpper(_param) == "PREVALUEUSE") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
{
PreValueUse = true;
}
}
if ((toUpper(zerlegt[0]) == "CHECKDIGITINCREASECONSISTENCY") && (zerlegt.size() > 1))
if ((toUpper(_param) == "CHECKDIGITINCREASECONSISTENCY") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
checkDigitIncreaseConsistency = true;
for (_n = 0; _n < NUMBERS.size(); ++_n)
NUMBERS[_n]->checkDigitIncreaseConsistency = true;
}
if ((toUpper(zerlegt[0]) == "ALLOWNEGATIVERATES") && (zerlegt.size() > 1))
if ((toUpper(_param) == "ALLOWNEGATIVERATES") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
AllowNegativeRates = true;
for (_n = 0; _n < NUMBERS.size(); ++_n)
NUMBERS[_n]->AllowNegativeRates = true;
}
if ((toUpper(zerlegt[0]) == "ERRORMESSAGE") && (zerlegt.size() > 1))
if ((toUpper(_param) == "ERRORMESSAGE") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
ErrorMessage = true;
}
if ((toUpper(zerlegt[0]) == "PREVALUEAGESTARTUP") && (zerlegt.size() > 1))
if ((toUpper(_param) == "IGNORELEADINGNAN") && (zerlegt.size() > 1))
{
if (toUpper(zerlegt[1]) == "TRUE")
IgnoreLeadingNaN = true;
}
if ((toUpper(_param) == "PREVALUEAGESTARTUP") && (zerlegt.size() > 1))
{
PreValueAgeStartup = std::stoi(zerlegt[1]);
}
if ((toUpper(zerlegt[0]) == "MAXRATEVALUE") && (zerlegt.size() > 1))
{
useMaxRateValue = true;
MaxRateValue = std::stof(zerlegt[1]);
}
}
if (PreValueUse) {
PreValueOkay = LoadPreValue();
LoadPreValue();
}
return true;
}
void ClassFlowPostProcessing::InitNUMBERS()
{
int anzDIGIT = 0;
int anzANALOG = 0;
std::vector<std::string> name_numbers;
if (flowDigit)
{
anzDIGIT = flowDigit->getAnzahlGENERAL();
flowDigit->UpdateNameNumbers(&name_numbers);
}
if (flowAnalog)
{
anzANALOG = flowAnalog->getAnzahlGENERAL();
flowAnalog->UpdateNameNumbers(&name_numbers);
}
printf("Anzahl NUMBERS: %d - DIGITS: %d, ANALOG: %d\n", name_numbers.size(), anzDIGIT, anzANALOG);
for (int _num = 0; _num < name_numbers.size(); ++_num)
{
NumberPost *_number = new NumberPost;
_number->name = name_numbers[_num];
_number->digit_roi = NULL;
if (flowDigit)
_number->digit_roi = flowDigit->FindGENERAL(name_numbers[_num]);
if (_number->digit_roi)
_number->AnzahlDigital = _number->digit_roi->ROI.size();
else
_number->AnzahlDigital = 0;
_number->analog_roi = NULL;
if (flowAnalog)
_number->analog_roi = flowAnalog->FindGENERAL(name_numbers[_num]);
if (_number->analog_roi)
_number->AnzahlAnalog = _number->analog_roi->ROI.size();
else
_number->AnzahlAnalog = 0;
_number->ReturnRawValue = ""; // Rohwert (mit N & führenden 0)
_number->ReturnValue = ""; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
_number->ReturnValueNoError = ""; // korrigierter Rückgabewert ohne Fehlermeldung
_number->ErrorMessageText = ""; // Fehlermeldung bei Consistency Check
_number->ReturnPreValue = "";
_number->PreValueOkay = false;
_number->AllowNegativeRates = false;
_number->MaxRateValue = 0.1;
_number->useMaxRateValue = false;
_number->checkDigitIncreaseConsistency = false;
_number->PreValueOkay = false;
_number->useMaxRateValue = false;
_number->DecimalShift = 0;
_number->DecimalShiftInitial = 0;
_number->FlowRateAct = 0; // m3 / min
_number->PreValue = 0; // letzter Wert, der gut ausgelesen wurde
_number->Value = 0; // letzer ausgelesener Wert, inkl. Korrekturen
_number->ReturnRawValue = ""; // Rohwert (mit N & führenden 0)
_number->ReturnValue = ""; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
_number->ReturnValueNoError = ""; // korrigierter Rückgabewert ohne Fehlermeldung
_number->ErrorMessageText = ""; // Fehlermeldung bei Consistency Check
_number->Nachkomma = _number->AnzahlAnalog;
NUMBERS.push_back(_number);
}
for (int i = 0; i < NUMBERS.size(); ++i)
printf("Number %s, Anz DIG: %d, Anz ANA %d\n", NUMBERS[i]->name.c_str(), NUMBERS[i]->AnzahlDigital, NUMBERS[i]->AnzahlAnalog);
}
string ClassFlowPostProcessing::ShiftDecimal(string in, int _decShift){
if (_decShift == 0){
@@ -264,142 +530,151 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
string digit = "";
string analog = "";
string zwvalue;
bool isdigit = false;
bool isanalog = false;
int AnzahlAnalog = 0;
string zw;
time_t imagetime = 0;
string rohwert;
ErrorMessageText = "";
// ErrorMessageText = "";
// Update Nachkomma, da sich beim Wechsel von CNNType Auto --> xyz auch die Nachkommastellen ändern können:
for (int i = 0; i < ListFlowControll->size(); ++i)
{
if (((*ListFlowControll)[i])->name().compare("ClassFlowMakeImage") == 0)
{
imagetime = ((ClassFlowMakeImage*)(*ListFlowControll)[i])->getTimeImageTaken();
}
if (((*ListFlowControll)[i])->name().compare("ClassFlowDigit") == 0)
{
isdigit = true;
digit = (*ListFlowControll)[i]->getReadout();
}
if (((*ListFlowControll)[i])->name().compare("ClassFlowAnalog") == 0)
{
isanalog = true;
analog = (*ListFlowControll)[i]->getReadout();
AnzahlAnalog = ((ClassFlowAnalog*)(*ListFlowControll)[i])->AnzahlROIs();
}
}
imagetime = flowMakeImage->getTimeImageTaken();
if (imagetime == 0)
time(&imagetime);
struct tm* timeinfo;
timeinfo = localtime(&imagetime);
char strftime_buf[64];
strftime(strftime_buf, sizeof(strftime_buf), "%Y-%m-%d_%H-%M-%S", timeinfo);
strftime(strftime_buf, sizeof(strftime_buf), "%Y-%m-%dT%H:%M:%S", timeinfo);
zwtime = std::string(strftime_buf);
printf("Anzahl NUMBERS: %d\n", NUMBERS.size());
// // TESTING ONLY////////////////////
// isdigit = true; digit = "12N";
// isanalog = true; analog = "456";
ReturnRawValue = "";
if (isdigit)
ReturnRawValue = digit;
if (isdigit && isanalog)
ReturnRawValue = ReturnRawValue + ".";
if (isanalog)
ReturnRawValue = ReturnRawValue + analog;
if (!isdigit)
for (int j = 0; j < NUMBERS.size(); ++j)
{
AnzahlAnalog = 0;
}
NUMBERS[j]->ReturnRawValue = "";
NUMBERS[j]->ErrorMessageText = "";
ReturnRawValue = ShiftDecimal(ReturnRawValue, DecimalShift);
rohwert = ReturnRawValue;
if (!PreValueUse || !PreValueOkay)
{
ReturnValue = ReturnRawValue;
ReturnValueNoError = ReturnRawValue;
if ((findDelimiterPos(ReturnValue, "N") == std::string::npos) && (ReturnValue.length() > 0))
{
while ((ReturnValue.length() > 1) && (ReturnValue[0] == '0'))
{
ReturnValue.erase(0, 1);
}
Value = std::stof(ReturnValue);
ReturnValueNoError = ReturnValue;
PreValueOkay = true;
PreValue = Value;
SavePreValue(Value, zwtime);
}
return true;
}
zw = ErsetzteN(ReturnRawValue);
Value = std::stof(zw);
if (checkDigitIncreaseConsistency)
{
Value = checkDigitConsistency(Value, DecimalShift, isanalog);
}
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
if ((!AllowNegativeRates) && (Value < PreValue))
{
ErrorMessageText = ErrorMessageText + "Negative Rate - Returned old value - read value: " + zwvalue + " - raw value: " + ReturnRawValue + " - checked value: " + std::to_string(Value) + " ";
Value = PreValue;
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
}
if (useMaxRateValue && (abs(Value - PreValue) > MaxRateValue))
{
ErrorMessageText = ErrorMessageText + "Rate too high - Returned old value - read value: " + zwvalue + " - checked value: " + std::to_string(Value) + " ";
Value = PreValue;
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
}
ReturnValueNoError = zwvalue;
ReturnValue = zwvalue;
if (ErrorMessage && (ErrorMessageText.length() > 0))
ReturnValue = ReturnValue + "\t" + ErrorMessageText;
if (ErrorMessageText.length() == 0)
{
PreValue = Value;
if (flowAnalog) NUMBERS[j]->AnzahlAnalog = flowAnalog->AnzahlROIs(j);
if (flowDigit) NUMBERS[j]->AnzahlDigital = flowDigit->AnzahlROIs(j);
SavePreValue(Value, zwtime);
if (flowDigit->isExtendedResolution())
NUMBERS[j]->DecimalShift = NUMBERS[j]->DecimalShiftInitial - 1;
NUMBERS[j]->Nachkomma = NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift;
if (NUMBERS[j]->digit_roi)
NUMBERS[j]->ReturnRawValue = flowDigit->getReadout(j);
if (NUMBERS[j]->digit_roi && NUMBERS[j]->analog_roi)
NUMBERS[j]->ReturnRawValue = NUMBERS[j]->ReturnRawValue + ".";
if (NUMBERS[j]->analog_roi)
NUMBERS[j]->ReturnRawValue = NUMBERS[j]->ReturnRawValue + flowAnalog->getReadout(j);
NUMBERS[j]->ReturnRawValue = ShiftDecimal(NUMBERS[j]->ReturnRawValue, NUMBERS[j]->DecimalShift);
///////////////// SPEZIALFALL für User Gustl ///////////////////////////////////////////////////////
if (IgnoreLeadingNaN)
{
while ((NUMBERS[j]->ReturnRawValue.length() > 1) && (NUMBERS[j]->ReturnRawValue[0] == 'N'))
{
NUMBERS[j]->ReturnRawValue.erase(0, 1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
rohwert = NUMBERS[j]->ReturnRawValue;
if (!PreValueUse || !NUMBERS[j]->PreValueOkay)
{
NUMBERS[j]->ReturnValue = NUMBERS[j]->ReturnRawValue;
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnRawValue;
if ((findDelimiterPos(NUMBERS[j]->ReturnValue, "N") == std::string::npos) && (NUMBERS[j]->ReturnValue.length() > 0))
{
while ((NUMBERS[j]->ReturnValue.length() > 1) && (NUMBERS[j]->ReturnValue[0] == '0'))
{
NUMBERS[j]->ReturnValue.erase(0, 1);
}
NUMBERS[j]->Value = std::stof(NUMBERS[j]->ReturnValue);
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
NUMBERS[j]->PreValueOkay = true;
NUMBERS[j]->PreValue = NUMBERS[j]->Value;
NUMBERS[j]->ReturnPreValue = RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
NUMBERS[j]->lastvalue = flowMakeImage->getTimeImageTaken();
zwtime = ConvertTimeToString(NUMBERS[j]->lastvalue, PREVALUE_TIME_FORMAT_OUTPUT);
UpdatePreValueINI = true;
SavePreValue();
}
}
else
{
zw = ErsetzteN(NUMBERS[j]->ReturnRawValue, NUMBERS[j]->PreValue);
NUMBERS[j]->Value = std::stof(zw);
if (NUMBERS[j]->checkDigitIncreaseConsistency)
{
NUMBERS[j]->Value = checkDigitConsistency(NUMBERS[j]->Value, NUMBERS[j]->DecimalShift, NUMBERS[j]->analog_roi != NULL, NUMBERS[j]->PreValue);
}
zwvalue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift);
if ((!NUMBERS[j]->AllowNegativeRates) && (NUMBERS[j]->Value < NUMBERS[j]->PreValue))
{
NUMBERS[j]->ErrorMessageText = NUMBERS[j]->ErrorMessageText + "Neg. Rate - Read: " + zwvalue + " - Raw: " + NUMBERS[j]->ReturnRawValue + " - Pre: " + RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma) + " ";
NUMBERS[j]->Value = NUMBERS[j]->PreValue;
zwvalue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift);
}
if (NUMBERS[j]->useMaxRateValue && (abs(NUMBERS[j]->Value - NUMBERS[j]->PreValue) > NUMBERS[j]->MaxRateValue))
{
NUMBERS[j]->ErrorMessageText = NUMBERS[j]->ErrorMessageText + "Rate too high - Read: " + RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->Nachkomma) + " - Pre: " + RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
NUMBERS[j]->Value = NUMBERS[j]->PreValue;
zwvalue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->Nachkomma);
}
NUMBERS[j]->ReturnValueNoError = zwvalue;
NUMBERS[j]->ReturnValue = zwvalue;
if (NUMBERS[j]->ErrorMessage && (NUMBERS[j]->ErrorMessageText.length() > 0))
NUMBERS[j]->ReturnValue = NUMBERS[j]->ReturnValue + "\t" + NUMBERS[j]->ErrorMessageText;
double difference = difftime(imagetime, NUMBERS[j]->lastvalue); // in Sekunden
difference /= 60; // in Minuten
NUMBERS[j]->FlowRateAct = (NUMBERS[j]->Value - NUMBERS[j]->PreValue) / difference;
NUMBERS[j]->lastvalue = imagetime;
if (NUMBERS[j]->ErrorMessageText.length() == 0)
{
NUMBERS[j]->PreValue = NUMBERS[j]->Value;
NUMBERS[j]->ReturnPreValue = RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
NUMBERS[j]->ErrorMessageText = "no error";
UpdatePreValueINI = true;
}
}
}
SavePreValue();
return true;
}
string ClassFlowPostProcessing::getReadout()
string ClassFlowPostProcessing::getReadout(int _number)
{
return ReturnValue;
return NUMBERS[_number]->ReturnValue;
}
string ClassFlowPostProcessing::getReadoutParam(bool _rawValue, bool _noerror)
string ClassFlowPostProcessing::getReadoutParam(bool _rawValue, bool _noerror, int _number)
{
if (_rawValue)
return ReturnRawValue;
return NUMBERS[_number]->ReturnRawValue;
if (_noerror)
return ReturnValueNoError;
return ReturnValue;
return NUMBERS[_number]->ReturnValueNoError;
return NUMBERS[_number]->ReturnValue;
}
string ClassFlowPostProcessing::RundeOutput(float _in, int _anzNachkomma){
@@ -426,7 +701,7 @@ string ClassFlowPostProcessing::RundeOutput(float _in, int _anzNachkomma){
}
string ClassFlowPostProcessing::ErsetzteN(string input)
string ClassFlowPostProcessing::ErsetzteN(string input, float _prevalue)
{
int posN, posPunkt;
int pot, ziffer;
@@ -447,7 +722,7 @@ string ClassFlowPostProcessing::ErsetzteN(string input)
pot = posPunkt - posN;
}
zw = PreValue / pow(10, pot);
zw =_prevalue / pow(10, pot);
ziffer = ((int) zw) % 10;
input[posN] = ziffer + 48;
@@ -457,7 +732,7 @@ string ClassFlowPostProcessing::ErsetzteN(string input)
return input;
}
float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamshift, bool _isanalog){
float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamshift, bool _isanalog, float _preValue){
int aktdigit, olddigit;
int aktdigit_before, olddigit_before;
int pot, pot_max;
@@ -475,12 +750,12 @@ float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamsh
{
zw = input / pow(10, pot-1);
aktdigit_before = ((int) zw) % 10;
zw = PreValue / pow(10, pot-1);
zw = _preValue / pow(10, pot-1);
olddigit_before = ((int) zw) % 10;
zw = input / pow(10, pot);
aktdigit = ((int) zw) % 10;
zw = PreValue / pow(10, pot);
zw = _preValue / pow(10, pot);
olddigit = ((int) zw) % 10;
no_nulldurchgang = (olddigit_before <= aktdigit_before);
@@ -506,8 +781,18 @@ float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamsh
return input;
}
string ClassFlowPostProcessing::getReadoutError()
string ClassFlowPostProcessing::getReadoutRate(int _number)
{
return ErrorMessageText;
return std::to_string(NUMBERS[_number]->FlowRateAct);
}
string ClassFlowPostProcessing::getReadoutTimeStamp(int _number)
{
return NUMBERS[_number]->timeStamp;
}
string ClassFlowPostProcessing::getReadoutError(int _number)
{
return NUMBERS[_number]->ErrorMessageText;
}

View File

@@ -1,47 +1,92 @@
#pragma once
#include "ClassFlow.h"
#include "ClassFlowMakeImage.h"
#include "ClassFlowCNNGeneral.h"
#include "ClassFlowCNNGeneral.h"
#include <string>
struct NumberPost {
float MaxRateValue;
bool useMaxRateValue;
bool ErrorMessage;
bool PreValueOkay;
bool AllowNegativeRates;
bool checkDigitIncreaseConsistency;
time_t lastvalue;
string timeStamp;
float FlowRateAct; // m3 / min
float PreValue; // letzter Wert, der gut ausgelesen wurde
float Value; // letzer ausgelesener Wert, inkl. Korrekturen
string ReturnRawValue; // Rohwert (mit N & führenden 0)
string ReturnValue; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
string ReturnPreValue; // korrigierter Rückgabewert ohne Fehlermeldung
string ReturnValueNoError;
string ErrorMessageText; // Fehlermeldung bei Consistency Check
int AnzahlAnalog;
int AnzahlDigital;
int DecimalShift;
int DecimalShiftInitial;
int Nachkomma;
general *digit_roi;
general *analog_roi;
string name;
};
class ClassFlowPostProcessing :
public ClassFlow
{
protected:
bool PreValueUse;
std::vector<NumberPost*> NUMBERS;
bool UpdatePreValueINI;
int PreValueAgeStartup;
bool AllowNegativeRates;
float MaxRateValue;
bool useMaxRateValue;
bool ErrorMessage;
bool PreValueOkay;
bool checkDigitIncreaseConsistency;
int DecimalShift;
bool IgnoreLeadingNaN; // SPEZIALFALL für User Gustl
ClassFlowCNNGeneral* flowAnalog;
ClassFlowCNNGeneral* flowDigit;
string FilePreValue;
float PreValue; // letzter Wert, der gut ausgelesen wurde
float Value; // letzer ausgelesener Wert, inkl. Korrekturen
string ReturnRawValue; // Rohwert (mit N & führenden 0)
string ReturnValue; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
string ReturnValueNoError; // korrigierter Rückgabewert ohne Fehlermeldung
string ErrorMessageText; // Fehlermeldung bei Consistency Check
ClassFlowMakeImage *flowMakeImage;
bool LoadPreValue(void);
string ShiftDecimal(string in, int _decShift);
string ErsetzteN(string);
float checkDigitConsistency(float input, int _decilamshift, bool _isanalog);
string ErsetzteN(string, float _prevalue);
float checkDigitConsistency(float input, int _decilamshift, bool _isanalog, float _preValue);
string RundeOutput(float _in, int _anzNachkomma);
void InitNUMBERS();
void handleDecimalSeparator(string _decsep, string _value);
void handleMaxRateValue(string _decsep, string _value);
public:
ClassFlowPostProcessing(std::vector<ClassFlow*>* lfc);
bool PreValueUse;
ClassFlowPostProcessing(std::vector<ClassFlow*>* lfc, ClassFlowCNNGeneral *_analog, ClassFlowCNNGeneral *_digit);
bool ReadParameter(FILE* pfile, string& aktparamgraph);
bool doFlow(string time);
string getReadout();
string getReadoutParam(bool _rawValue, bool _noerror);
string getReadoutError();
void SavePreValue(float value, string time = "");
string GetPreValue();
string getReadout(int _number);
string getReadoutParam(bool _rawValue, bool _noerror, int _number = 0);
string getReadoutError(int _number = 0);
string getReadoutRate(int _number = 0);
string getReadoutTimeStamp(int _number = 0);
void SavePreValue();
string GetPreValue(std::string _number = "");
void SetPreValue(float zw, string _numbers, bool _extern = false);
std::vector<NumberPost*> GetNumbers(){return NUMBERS;};
string name(){return "ClassFlowPostProcessing";};
};

View File

@@ -10,6 +10,7 @@
#include <string.h>
#include <esp_log.h>
#include "ClassLogFile.h"
//#include "ClassLogFile.h"
@@ -77,8 +78,9 @@ void memCopyGen(uint8_t* _source, uint8_t* _target, int _size)
FILE* OpenFileAndWait(const char* nm, char* _mode, int _waitsec)
FILE* OpenFileAndWait(const char* nm, const char* _mode, int _waitsec)
{
printf("open config file %s in mode %s\n", nm, _mode);
FILE *pfile = fopen(nm, _mode);
if (pfile == NULL)
@@ -313,6 +315,14 @@ string toUpper(string in)
return in;
}
string toLower(string in)
{
for (int i = 0; i < in.length(); ++i)
in[i] = tolower(in[i]);
return in;
}
// CPU Temp
extern "C" uint8_t temprature_sens_read();
float temperatureRead()
@@ -358,3 +368,30 @@ int removeFolder(const char* folderPath, const char* logTag) {
return deleted;
}
std::vector<string> HelperZerlegeZeile(std::string input, std::string _delimiter = "")
{
std::vector<string> Output;
std::string delimiter = " =,";
if (_delimiter.length() > 0){
delimiter = _delimiter;
}
input = trim(input, delimiter);
size_t pos = findDelimiterPos(input, delimiter);
std::string token;
while (pos != std::string::npos) {
token = input.substr(0, pos);
token = trim(token, delimiter);
Output.push_back(token);
input.erase(0, pos + 1);
input = trim(input, delimiter);
pos = findDelimiterPos(input, delimiter);
}
Output.push_back(input);
return Output;
}

View File

@@ -1,6 +1,7 @@
#pragma once
#include <string>
#include <fstream>
#include <vector>
using namespace std;
@@ -10,7 +11,7 @@ void FindReplace(std::string& line, std::string& oldString, std::string& newStri
void CopyFile(string input, string output);
FILE* OpenFileAndWait(const char* nm, char* _mode, int _waitsec = 1);
FILE* OpenFileAndWait(const char* nm, const char* _mode, int _waitsec = 1);
size_t findDelimiterPos(string input, string delimiter);
//string trim(string istring);
@@ -22,6 +23,7 @@ string getFileType(string filename);
int mkdir_r(const char *dir, const mode_t mode);
int removeFolder(const char* folderPath, const char* logTag);
string toLower(string in);
string toUpper(string in);
float temperatureRead();
@@ -30,6 +32,8 @@ time_t addDays(time_t startTime, int days);
void memCopyGen(uint8_t* _source, uint8_t* _target, int _size);
std::vector<string> HelperZerlegeZeile(std::string input, std::string _delimiter);
///////////////////////////
size_t getInternalESPHeapSize();
size_t getESPHeapSize();

View File

@@ -354,12 +354,10 @@ CImageBasis::CImageBasis(CImageBasis *_copyfrom, int _anzrepeat)
int memsize = width * height * channels;
rgb_image = (unsigned char*)GET_MEMORY(memsize);
TickType_t xDelay;
int anz = 1;
while (!rgb_image && (anz < _anzrepeat))
{
printf("Create Image from Copy - Speicher ist voll - Versuche es erneut: %d.\n", anz);
xDelay = 1000 / portTICK_PERIOD_MS;
printf("Create Image from Copy - Speicher ist voll - Versuche es erneut: %d.\n", anz);
rgb_image = (unsigned char*) malloc(memsize);
anz++;
}

View File

@@ -1,7 +1,7 @@
#include "CRotateImage.h"
CRotateImage::CRotateImage(CImageBasis *_org, CImageBasis *_temp)
CRotateImage::CRotateImage(CImageBasis *_org, CImageBasis *_temp, bool _flip)
{
rgb_image = _org->rgb_image;
channels = _org->channels;
@@ -9,8 +9,10 @@ CRotateImage::CRotateImage(CImageBasis *_org, CImageBasis *_temp)
height = _org->height;
bpp = _org->bpp;
externalImage = true;
ImageTMP = _temp;
ImageTMP = _temp;
ImageOrg = _org;
islocked = false;
doflip = _flip;
}
void CRotateImage::Mirror(){
@@ -58,12 +60,33 @@ void CRotateImage::Mirror(){
void CRotateImage::Rotate(float _angle, int _centerx, int _centery)
{
int org_width, org_height;
float m[2][3];
float x_center = _centerx;
float y_center = _centery;
_angle = _angle / 180 * M_PI;
if (doflip)
{
org_width = width;
org_height = height;
height = org_width;
width = org_height;
x_center = x_center - (org_width/2) + (org_height/2);
y_center = y_center + (org_width/2) - (org_height/2);
if (ImageOrg)
{
ImageOrg->height = height;
ImageOrg->width = width;
}
}
else
{
org_width = width;
org_height = height;
}
m[0][0] = cos(_angle);
m[0][1] = sin(_angle);
m[0][2] = (1 - m[0][0]) * x_center - m[0][1] * y_center;
@@ -72,6 +95,12 @@ void CRotateImage::Rotate(float _angle, int _centerx, int _centery)
m[1][1] = m[0][0];
m[1][2] = m[0][1] * x_center + (1 - m[0][0]) * y_center;
if (doflip)
{
m[0][2] = m[0][2] + (org_width/2) - (org_height/2);
m[1][2] = m[1][2] - (org_width/2) + (org_height/2);
}
int memsize = width * height * channels;
uint8_t* odata;
if (ImageTMP)
@@ -101,9 +130,9 @@ void CRotateImage::Rotate(float _angle, int _centerx, int _centery)
x_source += int(m[0][2]);
y_source += int(m[1][2]);
if ((x_source >= 0) && (x_source < width) && (y_source >= 0) && (y_source < height))
if ((x_source >= 0) && (x_source < org_width) && (y_source >= 0) && (y_source < org_height))
{
p_source = rgb_image + (channels * (y_source * width + x_source));
p_source = rgb_image + (channels * (y_source * org_width + x_source));
for (int _channels = 0; _channels < channels; ++_channels)
p_target[_channels] = p_source[_channels];
}

View File

@@ -4,10 +4,11 @@
class CRotateImage: public CImageBasis
{
public:
CImageBasis *ImageTMP;
CRotateImage(std::string _image) : CImageBasis(_image) {ImageTMP = NULL;};
CRotateImage(uint8_t* _rgb_image, int _channels, int _width, int _height, int _bpp) : CImageBasis(_rgb_image, _channels, _width, _height, _bpp) {ImageTMP = NULL;};
CRotateImage(CImageBasis *_org, CImageBasis *_temp);
CImageBasis *ImageTMP, *ImageOrg;
bool doflip;
CRotateImage(std::string _image, bool _flip = false) : CImageBasis(_image) {ImageTMP = NULL; ImageOrg = NULL; doflip = _flip;};
CRotateImage(uint8_t* _rgb_image, int _channels, int _width, int _height, int _bpp, bool _flip = false) : CImageBasis(_rgb_image, _channels, _width, _height, _bpp) {ImageTMP = NULL; ImageOrg = NULL; doflip = _flip;};
CRotateImage(CImageBasis *_org, CImageBasis *_temp, bool _flip = false);
void Rotate(float _angle);
void Rotate(float _angle, int _centerx, int _centery);

View File

@@ -12,7 +12,7 @@ ClassLogFile LogFile("/sdcard/log/message", "log_%Y-%m-%d.txt");
void ClassLogFile::WriteHeapInfo(std::string _id)
{
std::string _zw = "\t" + _id;
std::string _zw = "\t" + _id;
if (loglevel > 0)
_zw = _zw + "\t" + getESPHeapInfo();
@@ -77,7 +77,7 @@ void ClassLogFile::WriteToDedicatedFile(std::string _fn, std::string info, bool
time(&rawtime);
timeinfo = localtime(&rawtime);
strftime(buffer, 80, "%Y-%m-%d_%H-%M-%S", timeinfo);
strftime(buffer, 80, "%Y-%m-%dT%H:%M:%S", timeinfo);
zwtime = std::string(buffer);
info = zwtime + ": " + info;

View File

@@ -1,12 +1,14 @@
#include "interface_mqtt.h"
//#define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
#include "esp_log.h"
#include "mqtt_client.h"
#include "ClassLogFile.h"
static const char *TAG = "interface_mqtt";
static const char *TAG_INTERFACEMQTT = "interface_mqtt";
std::map<std::string, std::function<void()>>* connectFunktionMap = NULL;
std::map<std::string, std::function<bool(std::string, char*, int)>>* subscribeFunktionMap = NULL;
bool debugdetail = true;
// #define CONFIG_BROKER_URL "mqtt://192.168.178.43:1883"
@@ -16,51 +18,74 @@ esp_mqtt_event_id_t esp_mmqtt_ID = MQTT_EVENT_ANY;
bool mqtt_connected = false;
esp_mqtt_client_handle_t client = NULL;
void MQTTPublish(std::string _key, std::string _content){
void MQTTPublish(std::string _key, std::string _content, int retained_flag){
if (client && mqtt_connected) {
int msg_id;
std::string zw;
msg_id = esp_mqtt_client_publish(client, _key.c_str(), _content.c_str(), 0, 1, 0);
msg_id = esp_mqtt_client_publish(client, _key.c_str(), _content.c_str(), 0, 1, retained_flag);
zw = "sent publish successful in MQTTPublish, msg_id=" + std::to_string(msg_id) + ", " + _key + ", " + _content;
if (debugdetail) LogFile.WriteToFile(zw);
ESP_LOGI(TAG, "sent publish successful in MQTTPublish, msg_id=%d, %s, %s", msg_id, _key.c_str(), _content.c_str());
ESP_LOGD(TAG_INTERFACEMQTT, "sent publish successful in MQTTPublish, msg_id=%d, %s, %s", msg_id, _key.c_str(), _content.c_str());
}
else {
ESP_LOGI(TAG, "Problem with Publish, client=%d, mqtt_connected %d", (int) client, (int) mqtt_connected);
ESP_LOGW(TAG_INTERFACEMQTT, "Problem with Publish, client=%d, mqtt_connected %d", (int) client, (int) mqtt_connected);
}
}
static esp_err_t mqtt_event_handler_cb(esp_mqtt_event_handle_t event)
{
int msg_id;
std::string topic = "";
switch (event->event_id) {
case MQTT_EVENT_BEFORE_CONNECT:
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_BEFORE_CONNECT");
break;
case MQTT_EVENT_CONNECTED:
ESP_LOGI(TAG, "MQTT_EVENT_CONNECTED");
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_CONNECTED");
mqtt_connected = true;
MQTTconnected();
break;
case MQTT_EVENT_DISCONNECTED:
ESP_LOGI(TAG, "MQTT_EVENT_DISCONNECTED");
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_DISCONNECTED");
break;
case MQTT_EVENT_SUBSCRIBED:
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_SUBSCRIBED, msg_id=%d", event->msg_id);
msg_id = esp_mqtt_client_publish(client, "/topic/qos0", "data", 0, 0, 0);
ESP_LOGI(TAG_INTERFACEMQTT, "sent publish successful, msg_id=%d", msg_id);
break;
case MQTT_EVENT_UNSUBSCRIBED:
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_UNSUBSCRIBED, msg_id=%d", event->msg_id);
break;
case MQTT_EVENT_PUBLISHED:
ESP_LOGI(TAG, "MQTT_EVENT_PUBLISHED, msg_id=%d", event->msg_id);
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_PUBLISHED, msg_id=%d", event->msg_id);
break;
case MQTT_EVENT_DATA:
ESP_LOGI(TAG, "MQTT_EVENT_DATA");
printf("TOPIC=%.*s\r\n", event->topic_len, event->topic);
printf("DATA=%.*s\r\n", event->data_len, event->data);
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_DATA");
ESP_LOGI(TAG_INTERFACEMQTT, "TOPIC=%.*s\r\n", event->topic_len, event->topic);
ESP_LOGI(TAG_INTERFACEMQTT, "DATA=%.*s\r\n", event->data_len, event->data);
topic.assign(event->topic, event->topic_len);
if (subscribeFunktionMap != NULL) {
if (subscribeFunktionMap->find(topic) != subscribeFunktionMap->end()) {
ESP_LOGD(TAG_INTERFACEMQTT, "call handler function\r\n");
(*subscribeFunktionMap)[topic](topic, event->data, event->data_len);
}
} else {
ESP_LOGW(TAG_INTERFACEMQTT, "no handler available\r\n");
}
break;
case MQTT_EVENT_ERROR:
ESP_LOGI(TAG, "MQTT_EVENT_ERROR");
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_ERROR");
break;
default:
ESP_LOGI(TAG, "Other event id:%d", event->event_id);
ESP_LOGI(TAG_INTERFACEMQTT, "Other event id:%d", event->event_id);
break;
}
return ESP_OK;
}
static void mqtt_event_handler(void *handler_args, esp_event_base_t base, int32_t event_id, void *event_data) {
ESP_LOGD(TAG, "Event dispatched from event loop base=%s, event_id=%d", base, event_id);
ESP_LOGD(TAG_INTERFACEMQTT, "Event dispatched from event loop base=%s, event_id=%d", base, event_id);
mqtt_event_handler_cb((esp_mqtt_event_handle_t) event_data);
}
@@ -74,6 +99,7 @@ void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, st
.client_id = _clientid.c_str(),
.lwt_topic = _LWTContext.c_str(),
.lwt_msg = _zwmessage.c_str(),
.lwt_retain = 1,
.lwt_msg_len = _lzw,
.keepalive = _keepalive
};
@@ -81,12 +107,100 @@ void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, st
if (_user.length() && _password.length()){
mqtt_cfg.username = _user.c_str();
mqtt_cfg.password = _password.c_str();
printf("Connect to MQTT: %s, %s", mqtt_cfg.username, mqtt_cfg.password);
ESP_LOGI(TAG_INTERFACEMQTT, "Connect to MQTT: %s, %s", mqtt_cfg.username, mqtt_cfg.password);
};
client = esp_mqtt_client_init(&mqtt_cfg);
esp_mqtt_client_register_event(client, esp_mmqtt_ID, mqtt_event_handler, client);
esp_mqtt_client_start(client);
MQTTPublish(_LWTContext, "");
MQTTPublish(_LWTContext, "", 1);
}
void MQTTdestroy() {
if (client != NULL) {
esp_mqtt_client_stop(client);
esp_mqtt_client_destroy(client);
}
}
bool MQTTisConnected() {
return mqtt_connected;
}
void MQTTregisterConnectFunction(std::string name, std::function<void()> func){
ESP_LOGD(TAG_INTERFACEMQTT, "MQTTregisteronnectFunction %s\r\n", name.c_str());
if (connectFunktionMap == NULL) {
connectFunktionMap = new std::map<std::string, std::function<void()>>();
}
if ((*connectFunktionMap)[name] != NULL) {
ESP_LOGW(TAG_INTERFACEMQTT, "connect function %s already registred", name.c_str());
return;
}
(*connectFunktionMap)[name] = func;
if (mqtt_connected) {
func();
}
}
void MQTTunregisterConnectFunction(std::string name){
ESP_LOGD(TAG_INTERFACEMQTT, "MQTTregisteronnectFunction %s\r\n", name.c_str());
if ((connectFunktionMap != NULL) && (connectFunktionMap->find(name) != connectFunktionMap->end())) {
connectFunktionMap->erase(name);
}
}
void MQTTregisterSubscribeFunction(std::string topic, std::function<bool(std::string, char*, int)> func){
ESP_LOGD(TAG_INTERFACEMQTT, "MQTTregisterSubscribeFunction %s\r\n", topic.c_str());
if (subscribeFunktionMap == NULL) {
subscribeFunktionMap = new std::map<std::string, std::function<bool(std::string, char*, int)>>();
}
if ((*subscribeFunktionMap)[topic] != NULL) {
ESP_LOGW(TAG_INTERFACEMQTT, "topic %s already registred for subscription", topic.c_str());
return;
}
(*subscribeFunktionMap)[topic] = func;
if (mqtt_connected) {
int msg_id = esp_mqtt_client_subscribe(client, topic.c_str(), 0);
ESP_LOGD(TAG_INTERFACEMQTT, "topic %s subscribe successful, msg_id=%d", topic.c_str(), msg_id);
}
}
void MQTTconnected(){
if (mqtt_connected) {
if (connectFunktionMap != NULL) {
for(std::map<std::string, std::function<void()>>::iterator it = connectFunktionMap->begin(); it != connectFunktionMap->end(); ++it) {
it->second();
ESP_LOGD(TAG_INTERFACEMQTT, "call connect function %s", it->first.c_str());
}
}
if (subscribeFunktionMap != NULL) {
for(std::map<std::string, std::function<bool(std::string, char*, int)>>::iterator it = subscribeFunktionMap->begin(); it != subscribeFunktionMap->end(); ++it) {
int msg_id = esp_mqtt_client_subscribe(client, it->first.c_str(), 0);
ESP_LOGD(TAG_INTERFACEMQTT, "topic %s subscribe successful, msg_id=%d", it->first.c_str(), msg_id);
}
}
}
}
void MQTTdestroySubscribeFunction(){
if (subscribeFunktionMap != NULL) {
if (mqtt_connected) {
for(std::map<std::string, std::function<bool(std::string, char*, int)>>::iterator it = subscribeFunktionMap->begin(); it != subscribeFunktionMap->end(); ++it) {
int msg_id = esp_mqtt_client_unsubscribe(client, it->first.c_str());
ESP_LOGI(TAG_INTERFACEMQTT, "topic %s unsubscribe successful, msg_id=%d", it->first.c_str(), msg_id);
}
}
subscribeFunktionMap->clear();
delete subscribeFunktionMap;
subscribeFunktionMap = NULL;
}
}

View File

@@ -1,7 +1,23 @@
#ifndef INTERFACE_MQTT_H
#define INTERFACE_MQTT_H
#include <string>
#include <map>
#include <functional>
void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, std::string _password, std::string _LWTContext, int _keepalive);
void MQTTdestroy();
//void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user = "", std::string _password = "");
void MQTTPublish(std::string _key, std::string _content);
void MQTTPublish(std::string _key, std::string _content, int retained_flag = 0);
bool MQTTisConnected();
void MQTTregisterConnectFunction(std::string name, std::function<void()> func);
void MQTTunregisterConnectFunction(std::string name);
void MQTTregisterSubscribeFunction(std::string topic, std::function<bool(std::string, char*, int)> func);
void MQTTdestroySubscribeFunction();
void MQTTconnected();
#endif //INTERFACE_MQTT_H

View File

@@ -17,6 +17,7 @@ float CTfLiteClass::GetOutputValue(int nr)
return output2->data.f[nr];
}
int CTfLiteClass::GetClassFromImageBasis(CImageBasis *rs)
{
if (!LoadInputImageBasis(rs))
@@ -27,6 +28,48 @@ int CTfLiteClass::GetClassFromImageBasis(CImageBasis *rs)
return GetOutClassification();
}
int CTfLiteClass::GetOutClassification(int _von, int _bis)
{
TfLiteTensor* output2 = interpreter->output(0);
float zw_max;
float zw;
int zw_class;
if (output2 == NULL)
return -1;
int numeroutput = output2->dims->data[1];
//printf("\n number output neurons: %d\n\n", numeroutput);
if (_bis == -1)
_bis = numeroutput;
if (_von == -1)
_von = 0;
if (_bis > numeroutput)
{
printf("ANZAHL OUTPUT NEURONS passt nicht zu geforderter Classifizierung!");
return -1;
}
zw_max = output2->data.f[_von];
zw_class = _von;
for (int i = _von+1; i <= _bis; ++i)
{
zw = output2->data.f[i];
if (zw > zw_max)
{
zw_max = zw;
zw_class = i;
}
}
return (zw_class - _von);
}
/*
int CTfLiteClass::GetOutClassification()
{
TfLiteTensor* output2 = interpreter->output(0);
@@ -50,6 +93,7 @@ int CTfLiteClass::GetOutClassification()
}
return zw_class;
}
*/
void CTfLiteClass::GetInputDimension(bool silent = false)
{
@@ -70,18 +114,18 @@ void CTfLiteClass::GetInputDimension(bool silent = false)
}
void CTfLiteClass::GetOutPut()
int CTfLiteClass::GetAnzOutPut(bool silent)
{
TfLiteTensor* output2 = this->interpreter->output(0);
int numdim = output2->dims->size;
printf("NumDimension: %d\n", numdim);
if (!silent) printf("NumDimension: %d\n", numdim);
int sizeofdim;
for (int j = 0; j < numdim; ++j)
{
sizeofdim = output2->dims->data[j];
printf("SizeOfDimension %d: %d\n", j, sizeofdim);
if (!silent) printf("SizeOfDimension %d: %d\n", j, sizeofdim);
}
@@ -92,20 +136,22 @@ void CTfLiteClass::GetOutPut()
for (int i = 0; i < numeroutput; ++i)
{
fo = output2->data.f[i];
printf("Result %d: %f\n", i, fo);
if (!silent) printf("Result %d: %f\n", i, fo);
}
return numeroutput;
}
void CTfLiteClass::Invoke()
{
interpreter->Invoke();
if (interpreter != nullptr)
interpreter->Invoke();
}
bool CTfLiteClass::LoadInputImageBasis(CImageBasis *rs)
{
std::string zw = "ClassFlowAnalog::doNeuralNetwork nach LoadInputResizeImage: ";
std::string zw = "ClassFlowCNNGeneral::doNeuralNetwork nach LoadInputResizeImage: ";
unsigned int w = rs->width;
unsigned int h = rs->height;
@@ -148,6 +194,8 @@ void CTfLiteClass::MakeAllocate()
TfLiteStatus allocate_status = this->interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
LogFile.WriteToFile("AllocateTensors() failed");
this->GetInputDimension();
return;
}
@@ -155,9 +203,9 @@ void CTfLiteClass::MakeAllocate()
}
void CTfLiteClass::GetInputTensorSize(){
#ifdef DEBUG_DETAIL_ON
float *zw = this->input;
int test = sizeof(zw);
#ifdef DEBUG_DETAIL_ON
printf("Input Tensor Dimension: %d\n", test);
#endif
}
@@ -184,13 +232,11 @@ unsigned char* CTfLiteClass::ReadFileToCharArray(std::string _fn)
unsigned char *result = (unsigned char*) malloc(size);
int anz = 1;
TickType_t xDelay;
while (!result && (anz < 6)) // maximal 5x versuchen (= 5s)
{
#ifdef DEBUG_DETAIL_ON
printf("Speicher ist voll - Versuche es erneut: %d.\n", anz);
#endif
xDelay = 1000 / portTICK_PERIOD_MS;
result = (unsigned char*) malloc(size);
anz++;
}
@@ -208,7 +254,7 @@ unsigned char* CTfLiteClass::ReadFileToCharArray(std::string _fn)
return result;
}
void CTfLiteClass::LoadModel(std::string _fn){
bool CTfLiteClass::LoadModel(std::string _fn){
#ifdef SUPRESS_TFLITE_ERRORS
this->error_reporter = new tflite::OwnMicroErrorReporter;
@@ -219,9 +265,14 @@ void CTfLiteClass::LoadModel(std::string _fn){
unsigned char *rd;
rd = ReadFileToCharArray(_fn.c_str());
if (rd == NULL)
return false;
this->model = tflite::GetModel(rd);
free(rd);
TFLITE_MINIMAL_CHECK(model != nullptr);
return true;
}

View File

@@ -9,7 +9,7 @@
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
//#include "tensorflow/lite/version.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "esp_err.h"
#include "esp_log.h"
@@ -56,14 +56,18 @@ class CTfLiteClass
public:
CTfLiteClass();
~CTfLiteClass();
void LoadModel(std::string _fn);
bool LoadModel(std::string _fn);
void MakeAllocate();
void GetInputTensorSize();
bool LoadInputImageBasis(CImageBasis *rs);
void Invoke();
void GetOutPut();
int GetOutClassification();
int GetAnzOutPut(bool silent = true);
// void GetOutPut();
// int GetOutClassification();
int GetOutClassification(int _von = -1, int _bis = -1);
int GetClassFromImageBasis(CImageBasis *rs);
std::string GetStatusFlow();
float GetOutputValue(int nr);
void GetInputDimension(bool silent);

View File

@@ -8,6 +8,7 @@
#include <iomanip>
#include <sstream>
#include "defines.h"
#include "Helper.h"
#include "esp_camera.h"
@@ -17,8 +18,9 @@
#include "ClassFlowControll.h"
#include "ClassLogFile.h"
#include "server_GPIO.h"
//#define DEBUG_DETAIL_ON
// #define DEBUG_DETAIL_ON
ClassFlowControll tfliteflow;
@@ -37,6 +39,9 @@ bool auto_isrunning = false;
int countRounds = 0;
static const char *TAGTFLITE = "server_tflite";
int getCountFlowRounds() {
return countRounds;
}
@@ -64,9 +69,11 @@ void KillTFliteTasks()
#ifdef DEBUG_DETAIL_ON
printf("Handle: xHandleblink_task_doFlow: %ld\n", (long) xHandleblink_task_doFlow);
#endif
if (xHandleblink_task_doFlow)
if (xHandleblink_task_doFlow != NULL)
{
vTaskDelete(xHandleblink_task_doFlow);
TaskHandle_t xHandleblink_task_doFlowTmp = xHandleblink_task_doFlow;
xHandleblink_task_doFlow = NULL;
vTaskDelete(xHandleblink_task_doFlowTmp);
#ifdef DEBUG_DETAIL_ON
printf("Killed: xHandleblink_task_doFlow\n");
#endif
@@ -75,9 +82,11 @@ void KillTFliteTasks()
#ifdef DEBUG_DETAIL_ON
printf("Handle: xHandletask_autodoFlow: %ld\n", (long) xHandletask_autodoFlow);
#endif
if (xHandletask_autodoFlow)
if (xHandletask_autodoFlow != NULL)
{
vTaskDelete(xHandletask_autodoFlow);
TaskHandle_t xHandletask_autodoFlowTmp = xHandletask_autodoFlow;
xHandletask_autodoFlow = NULL;
vTaskDelete(xHandletask_autodoFlowTmp);
#ifdef DEBUG_DETAIL_ON
printf("Killed: xHandletask_autodoFlow\n");
#endif
@@ -87,11 +96,10 @@ void KillTFliteTasks()
void doInit(void)
{
string config = "/sdcard/config/config.ini";
#ifdef DEBUG_DETAIL_ON
printf("Start tfliteflow.InitFlow(config);\n");
#endif
tfliteflow.InitFlow(config);
tfliteflow.InitFlow(CONFIG_FILE);
#ifdef DEBUG_DETAIL_ON
printf("Finished tfliteflow.InitFlow(config);\n");
#endif
@@ -136,7 +144,7 @@ esp_err_t handler_init(httpd_req_t *req)
printf("handler_doinit uri:\n"); printf(req->uri); printf("\n");
#endif
char* resp_str = "Init started<br>";
const char* resp_str = "Init started<br>";
httpd_resp_send(req, resp_str, strlen(resp_str));
doInit();
@@ -159,8 +167,6 @@ esp_err_t handler_doflow(httpd_req_t *req)
LogFile.WriteHeapInfo("handler_doflow - Start");
#endif
char* resp_str;
printf("handler_doFlow uri: "); printf(req->uri); printf("\n");
if (flowisrunning)
@@ -173,7 +179,7 @@ esp_err_t handler_doflow(httpd_req_t *req)
{
xTaskCreate(&blink_task_doFlow, "blink_doFlow", configMINIMAL_STACK_SIZE * 64, NULL, tskIDLE_PRIORITY+1, &xHandleblink_task_doFlow);
}
resp_str = "doFlow gestartet - dauert ca. 60 Sekunden";
const char* resp_str = "doFlow gestartet - dauert ca. 60 Sekunden";
httpd_resp_send(req, resp_str, strlen(resp_str));
/* Respond with an empty chunk to signal HTTP response completion */
httpd_resp_send_chunk(req, NULL, 0);
@@ -196,6 +202,8 @@ esp_err_t handler_wasserzaehler(httpd_req_t *req)
bool _rawValue = false;
bool _noerror = false;
bool _all = false;
std::string _type = "value";
string zw;
printf("handler_wasserzaehler uri:\n"); printf(req->uri); printf("\n");
@@ -206,6 +214,22 @@ esp_err_t handler_wasserzaehler(httpd_req_t *req)
if (httpd_req_get_url_query_str(req, _query, 100) == ESP_OK)
{
// printf("Query: "); printf(_query); printf("\n");
if (httpd_query_key_value(_query, "all", _size, 10) == ESP_OK)
{
#ifdef DEBUG_DETAIL_ON
printf("all is found"); printf(_size); printf("\n");
#endif
_all = true;
}
if (httpd_query_key_value(_query, "type", _size, 10) == ESP_OK)
{
#ifdef DEBUG_DETAIL_ON
printf("all is found"); printf(_size); printf("\n");
#endif
_type = std::string(_size);
}
if (httpd_query_key_value(_query, "rawvalue", _size, 10) == ESP_OK)
{
#ifdef DEBUG_DETAIL_ON
@@ -222,6 +246,29 @@ esp_err_t handler_wasserzaehler(httpd_req_t *req)
}
}
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
if (_all)
{
httpd_resp_set_type(req, "text/plain");
printf("TYPE: %s\n", _type.c_str());
int _intype = READOUT_TYPE_VALUE;
if (_type == "prevalue")
_intype = READOUT_TYPE_PREVALUE;
if (_type == "raw")
_intype = READOUT_TYPE_RAWVALUE;
if (_type == "error")
_intype = READOUT_TYPE_ERROR;
zw = tfliteflow.getReadoutAll(_intype);
printf("ZW: %s\n", zw.c_str());
if (zw.length() > 0)
httpd_resp_sendstr_chunk(req, zw.c_str());
httpd_resp_sendstr_chunk(req, NULL);
return ESP_OK;
}
zw = tfliteflow.getReadout(_rawValue, _noerror);
if (zw.length() > 0)
httpd_resp_sendstr_chunk(req, zw.c_str());
@@ -237,16 +284,27 @@ esp_err_t handler_wasserzaehler(httpd_req_t *req)
httpd_resp_sendstr_chunk(req, txt.c_str());
std::vector<HTMLInfo*> htmlinfo;
htmlinfo = tfliteflow.GetAllDigital();
htmlinfo = tfliteflow.GetAllDigital();
printf("Size of htmlinfo: %i\n", htmlinfo.size());
for (int i = 0; i < htmlinfo.size(); ++i)
{
if (htmlinfo[i]->val == 10)
zw = "NaN";
if (tfliteflow.GetTypeDigital() == Digital)
{
if (htmlinfo[i]->val == 10)
zw = "NaN";
else
zw = to_string((int) htmlinfo[i]->val);
txt = "<img src=\"/img_tmp/" + htmlinfo[i]->filename + "\"> " + zw;
}
else
{
zw = to_string((int) htmlinfo[i]->val);
std::stringstream stream;
stream << std::fixed << std::setprecision(1) << htmlinfo[i]->val;
zw = stream.str();
txt = "<img src=\"/img_tmp/" + htmlinfo[i]->filename + "\"> " + zw;
}
txt = "<img src=\"/img_tmp/" + htmlinfo[i]->filename + "\"> " + zw;
httpd_resp_sendstr_chunk(req, txt.c_str());
delete htmlinfo[i];
}
@@ -429,7 +487,7 @@ esp_err_t handler_editflow(httpd_req_t *req)
// printf("Parameter host: "); printf(_host.c_str()); printf("\n");
// string zwzw = "Do " + _task + " start\n"; printf(zwzw.c_str());
bool changed = Camera.SetBrightnessContrastSaturation(bri, con, sat);
Camera.SetBrightnessContrastSaturation(bri, con, sat);
std::string zw = tfliteflow.doSingleStep("[MakeImage]", _host);
httpd_resp_sendstr_chunk(req, zw.c_str());
}
@@ -446,7 +504,9 @@ esp_err_t handler_editflow(httpd_req_t *req)
// string zwzw = "Do " + _task + " start\n"; printf(zwzw.c_str());
std::string zw = tfliteflow.doSingleStep("[Alignment]", _host);
httpd_resp_sendstr_chunk(req, zw.c_str());
}
}
/*
if (_task.compare("test_analog") == 0)
{
std::string _host = "";
@@ -457,7 +517,9 @@ esp_err_t handler_editflow(httpd_req_t *req)
// string zwzw = "Do " + _task + " start\n"; printf(zwzw.c_str());
std::string zw = tfliteflow.doSingleStep("[Analog]", _host);
httpd_resp_sendstr_chunk(req, zw.c_str());
}
}
*/
/*
if (_task.compare("test_digits") == 0)
{
std::string _host = "";
@@ -470,6 +532,7 @@ esp_err_t handler_editflow(httpd_req_t *req)
std::string zw = tfliteflow.doSingleStep("[Digits]", _host);
httpd_resp_sendstr_chunk(req, zw.c_str());
}
*/
/* Respond with an empty chunk to signal HTTP response completion */
@@ -483,6 +546,36 @@ esp_err_t handler_editflow(httpd_req_t *req)
};
esp_err_t handler_statusflow(httpd_req_t *req)
{
#ifdef DEBUG_DETAIL_ON
LogFile.WriteHeapInfo("handler_prevalue - Start");
#endif
const char* resp_str;
string zw;
#ifdef DEBUG_DETAIL_ON
printf("handler_prevalue:\n"); printf(req->uri); printf("\n");
#endif
zw = tfliteflow.getActStatus();
resp_str = zw.c_str();
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
httpd_resp_send(req, resp_str, strlen(resp_str));
/* Respond with an empty chunk to signal HTTP response completion */
httpd_resp_send_chunk(req, NULL, 0);
#ifdef DEBUG_DETAIL_ON
LogFile.WriteHeapInfo("handler_prevalue - Start");
#endif
return ESP_OK;
};
esp_err_t handler_prevalue(httpd_req_t *req)
{
#ifdef DEBUG_DETAIL_ON
@@ -498,6 +591,7 @@ esp_err_t handler_prevalue(httpd_req_t *req)
char _query[100];
char _size[10] = "";
char _numbers[50] = "default";
if (httpd_req_get_url_query_str(req, _query, 100) == ESP_OK)
{
@@ -511,15 +605,24 @@ esp_err_t handler_prevalue(httpd_req_t *req)
printf("Value: "); printf(_size); printf("\n");
#endif
}
httpd_query_key_value(_query, "numbers", _numbers, 50);
}
if (strlen(_size) == 0)
zw = tfliteflow.GetPrevalue();
{
zw = tfliteflow.GetPrevalue(std::string(_numbers));
}
else
zw = "SetPrevalue to " + tfliteflow.UpdatePrevalue(_size);
{
zw = "SetPrevalue to " + tfliteflow.UpdatePrevalue(_size, _numbers, true);
}
resp_str = zw.c_str();
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
httpd_resp_send(req, resp_str, strlen(resp_str));
/* Respond with an empty chunk to signal HTTP response completion */
httpd_resp_send_chunk(req, NULL, 0);
@@ -535,17 +638,17 @@ void task_autodoFlow(void *pvParameter)
{
int64_t fr_start, fr_delta_ms;
printf("task_autodoFlow: start\r\n");
doInit();
auto_isrunning = tfliteflow.isAutoStart(auto_intervall);
gpio_handler_init();
auto_isrunning = tfliteflow.isAutoStart(auto_intervall);
if (isSetupModusActive()) {
auto_isrunning = false;
std::string zw_time = gettimestring(LOGFILE_TIME_FORMAT);
tfliteflow.doFlowMakeImageOnly(zw_time);
}
while (auto_isrunning)
{
std::string _zw = "task_autodoFlow - next round - Round #" + std::to_string(++countRounds);
@@ -590,6 +693,7 @@ void task_autodoFlow(void *pvParameter)
}
vTaskDelete(NULL); //Delete this task if it exits from the loop above
xHandletask_autodoFlow = NULL;
printf("task_autodoFlow: end\r\n");
}
void TFliteDoAutoStart()
@@ -597,6 +701,11 @@ void TFliteDoAutoStart()
xTaskCreate(&task_autodoFlow, "task_autodoFlow", configMINIMAL_STACK_SIZE * 64, NULL, tskIDLE_PRIORITY+1, &xHandletask_autodoFlow);
}
std::string GetMQTTMainTopic()
{
return tfliteflow.GetMQTTMainTopic();
}
void register_server_tflite_uri(httpd_handle_t server)
@@ -621,6 +730,10 @@ void register_server_tflite_uri(httpd_handle_t server)
camuri.user_ctx = (void*) "Light Off";
httpd_register_uri_handler(server, &camuri);
camuri.uri = "/statusflow.html";
camuri.handler = handler_statusflow;
camuri.user_ctx = (void*) "Light Off";
httpd_register_uri_handler(server, &camuri);
camuri.uri = "/editflow.html";
camuri.handler = handler_editflow;

View File

@@ -1,12 +1,11 @@
#include <esp_log.h>
#include <string>
#include <esp_http_server.h>
#include "CImageBasis.h"
//#include "ClassControllCamera.h"
static const char *TAGTFLITE = "server_tflite";
void register_server_tflite_uri(httpd_handle_t server);
void KillTFliteTasks();
@@ -15,6 +14,8 @@ void TFliteDoAutoStart();
bool isSetupModusActive();
std::string GetMQTTMainTopic();
esp_err_t GetJPG(std::string _filename, httpd_req_t *req);
esp_err_t GetRawJPG(httpd_req_t *req);

View File

@@ -18,6 +18,7 @@
static const char *TAG = "sntp";
bool setTimeAlwaysOnReboot = true;
time_t bootTime;
static void obtain_time(void);
static void initialize_sntp(void);
@@ -27,6 +28,17 @@ void time_sync_notification_cb(struct timeval *tv)
ESP_LOGI(TAG, "Notification of a time synchronization event");
}
std::string ConvertTimeToString(time_t _time, const char * frm)
{
struct tm timeinfo;
char strftime_buf[64];
localtime_r(&_time, &timeinfo);
strftime(strftime_buf, sizeof(strftime_buf), frm, &timeinfo);
std::string result(strftime_buf);
return result;
}
std::string gettimestring(const char * frm)
{
time_t now;
@@ -114,4 +126,17 @@ static void initialize_sntp(void)
sntp_setservername(0, "pool.ntp.org");
// sntp_set_time_sync_notification_cb(time_sync_notification_cb);
sntp_init();
}
void setBootTime()
{
time(&bootTime);
}
time_t getUpTime()
{
time_t now;
time(&now);
return now - bootTime;
}

View File

@@ -15,5 +15,10 @@
void setup_time(void);
std::string gettimestring(const char * frm);
std::string ConvertTimeToString(time_t _time, const char * frm);
void setTimeZone(std::string _tzstring);
void reset_servername(std::string _servername);
void reset_servername(std::string _servername);
void setBootTime();
time_t getUpTime();

View File

@@ -0,0 +1,7 @@
FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
REQUIRES nvs_flash jomjol_helper)

View File

@@ -0,0 +1,289 @@
#include "connect_wlan.h"
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/event_groups.h"
#include "driver/gpio.h"
#include "esp_system.h"
#include "esp_wifi.h"
#include "esp_event.h"
#include "esp_log.h"
#include "nvs_flash.h"
#include "lwip/err.h"
#include "lwip/sys.h"
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <iostream>
#define EXAMPLE_ESP_MAXIMUM_RETRY 1000
/* FreeRTOS event group to signal when we are connected*/
static EventGroupHandle_t s_wifi_event_group;
/* The event group allows multiple bits for each event, but we only care about two events:
* - we are connected to the AP with an IP
* - we failed to connect after the maximum amount of retries */
#define WIFI_CONNECTED_BIT BIT0
#define WIFI_FAIL_BIT BIT1
static const char *TAG = "wifi station";
static int s_retry_num = 0;
///////////////////////////////////////////////////////////
#define BLINK_GPIO GPIO_NUM_33
int BlinkDauer;
int BlinkAnzahl;
bool BlinkOff;
bool BlinkIsRunning = false;
std::string hostname = "";
std::string std_hostname = "watermeter";
std::string ipadress = "";
std::string ssid = "";
std::string getIPAddress()
{
return ipadress;
}
std::string getSSID()
{
return ssid;
}
void task_doBlink(void *pvParameter)
{
ESP_LOGI("BLINK", "Blinken - start");
while (BlinkIsRunning)
{
// ESP_LOGI("BLINK", "Blinken - wait");
vTaskDelay(100 / portTICK_PERIOD_MS);
}
BlinkIsRunning = true;
// Init the GPIO
gpio_pad_select_gpio(BLINK_GPIO);
/* Set the GPIO as a push/pull output */
gpio_set_direction(BLINK_GPIO, GPIO_MODE_OUTPUT);
for (int i = 0; i < BlinkAnzahl; ++i)
{
if (BlinkAnzahl > 1)
{
gpio_set_level(BLINK_GPIO, 1);
vTaskDelay(BlinkDauer / portTICK_PERIOD_MS);
}
gpio_set_level(BLINK_GPIO, 0);
vTaskDelay(BlinkDauer / portTICK_PERIOD_MS);
}
if (BlinkOff)
gpio_set_level(BLINK_GPIO, 1);
ESP_LOGI("BLINK", "Blinken - done");
BlinkIsRunning = false;
vTaskDelete(NULL); //Delete this task if it exits from the loop above
}
void LEDBlinkTask(int _dauer, int _anz, bool _off)
{
BlinkDauer = _dauer;
BlinkAnzahl = _anz;
BlinkOff = _off;
xTaskCreate(&task_doBlink, "task_doBlink", configMINIMAL_STACK_SIZE * 8, NULL, tskIDLE_PRIORITY+1, NULL);
}
/////////////////////////////////////////////////////////
static void event_handler(void* arg, esp_event_base_t event_base,
int32_t event_id, void* event_data)
{
if (event_base == WIFI_EVENT && event_id == WIFI_EVENT_STA_START) {
LEDBlinkTask(200, 1, true);
esp_wifi_connect();
} else if (event_base == WIFI_EVENT && event_id == WIFI_EVENT_STA_DISCONNECTED) {
// if (s_retry_num < EXAMPLE_ESP_MAXIMUM_RETRY){
esp_wifi_connect();
s_retry_num++;
ESP_LOGI(TAG, "retry to connect to the AP");
ESP_LOGI(TAG,"connect to the AP fail");
} else if (event_base == IP_EVENT && event_id == IP_EVENT_STA_GOT_IP) {
ip_event_got_ip_t* event = (ip_event_got_ip_t*) event_data;
ESP_LOGI(TAG, "got ip:" IPSTR, IP2STR(&event->ip_info.ip));
ipadress = std::string(ip4addr_ntoa((const ip4_addr*) &event->ip_info.ip));
s_retry_num = 0;
xEventGroupSetBits(s_wifi_event_group, WIFI_CONNECTED_BIT);
LEDBlinkTask(1000, 5, true);
}
}
void strinttoip4(const char *ip, int &a, int &b, int &c, int &d) {
std::string zw = std::string(ip);
std::stringstream s(zw);
char ch; //to temporarily store the '.'
s >> a >> ch >> b >> ch >> c >> ch >> d;
}
void wifi_init_sta(const char *_ssid, const char *_password, const char *_hostname, const char *_ipadr, const char *_gw, const char *_netmask, const char *_dns)
{
ssid = std::string(_ssid);
s_wifi_event_group = xEventGroupCreate();
ESP_ERROR_CHECK(esp_netif_init());
ESP_ERROR_CHECK(esp_event_loop_create_default());
/////////////////////////////////////////////////////////////////
esp_netif_t *my_sta = esp_netif_create_default_wifi_sta();
if ((_ipadr != NULL) && (_gw != NULL) && (_netmask != NULL))
{
ESP_LOGI(TAG, "set IP %s, GW %s, Netmask %s manual", _ipadr, _gw, _netmask);
esp_netif_dhcpc_stop(my_sta);
esp_netif_ip_info_t ip_info;
int a, b, c, d;
strinttoip4(_ipadr, a, b, c, d);
IP4_ADDR(&ip_info.ip, a, b, c, d);
strinttoip4(_gw, a, b, c, d);
IP4_ADDR(&ip_info.gw, a, b, c, d);
strinttoip4(_netmask, a, b, c, d);
IP4_ADDR(&ip_info.netmask, a, b, c, d);
esp_netif_set_ip_info(my_sta, &ip_info);
}
wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
ESP_ERROR_CHECK(esp_wifi_init(&cfg));
if ((_ipadr != NULL) && (_gw != NULL) && (_netmask != NULL))
{
if (_dns == NULL)
_dns = _gw;
ESP_LOGI(TAG, "set DNS manual");
esp_netif_dns_info_t dns_info;
ip4_addr_t ip;
ip.addr = esp_ip4addr_aton(_dns);
ip_addr_set_ip4_u32(&dns_info.ip, ip.addr);
ESP_ERROR_CHECK(esp_netif_set_dns_info(my_sta, ESP_NETIF_DNS_MAIN, &dns_info));
}
/////////////////////////////////////////////////////////////////
// esp_netif_create_default_wifi_sta();
// wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
// ESP_ERROR_CHECK(esp_wifi_init(&cfg));
/*
////////////////////////////// esp-idf 4.2 //////////////////////////
esp_event_handler_instance_t instance_any_id;
esp_event_handler_instance_t instance_got_ip;
ESP_ERROR_CHECK(esp_event_handler_instance_register(WIFI_EVENT,
ESP_EVENT_ANY_ID,
&event_handler,
NULL,
&instance_any_id));
ESP_ERROR_CHECK(esp_event_handler_instance_register(IP_EVENT,
IP_EVENT_STA_GOT_IP,
&event_handler,
NULL,
&instance_got_ip));
////////////////////////// ENDE esp-idf 4.2 ///////////////////////////
*/
ESP_ERROR_CHECK(esp_event_handler_register(WIFI_EVENT, ESP_EVENT_ANY_ID, &event_handler, NULL));
ESP_ERROR_CHECK(esp_event_handler_register(IP_EVENT, IP_EVENT_STA_GOT_IP, &event_handler, NULL));
wifi_config_t wifi_config = { };
strcpy((char*)wifi_config.sta.ssid, (const char*)_ssid);
strcpy((char*)wifi_config.sta.password, (const char*)_password);
ESP_ERROR_CHECK(esp_wifi_set_mode(WIFI_MODE_STA) );
ESP_ERROR_CHECK(esp_wifi_set_config(WIFI_IF_STA, &wifi_config) );
ESP_ERROR_CHECK(esp_wifi_start() );
if (_hostname != NULL)
{
esp_err_t ret = tcpip_adapter_set_hostname(TCPIP_ADAPTER_IF_STA , _hostname);
hostname = std::string(_hostname);
if(ret != ESP_OK ){
ESP_LOGE(TAG,"failed to set hostname:%d",ret);
}
}
ESP_LOGI(TAG, "wifi_init_sta finished.");
/* Waiting until either the connection is established (WIFI_CONNECTED_BIT) or connection failed for the maximum
* number of re-tries (WIFI_FAIL_BIT). The bits are set by event_handler() (see above) */
EventBits_t bits = xEventGroupWaitBits(s_wifi_event_group,
WIFI_CONNECTED_BIT | WIFI_FAIL_BIT,
pdFALSE,
pdFALSE,
portMAX_DELAY);
/* xEventGroupWaitBits() returns the bits before the call returned, hence we can test which event actually
* happened. */
if (bits & WIFI_CONNECTED_BIT) {
ESP_LOGI(TAG, "connected to ap SSID:%s password:%s",
_ssid, _password);
} else if (bits & WIFI_FAIL_BIT) {
ESP_LOGI(TAG, "Failed to connect to SSID:%s, password:%s",
_ssid, _password);
} else {
ESP_LOGE(TAG, "UNEXPECTED EVENT");
}
/* The event will not be processed after unregister */
/*
////////////////////////////// esp-idf 4.2 //////////////////////////
ESP_ERROR_CHECK(esp_event_handler_instance_unregister(IP_EVENT, IP_EVENT_STA_GOT_IP, instance_got_ip));
ESP_ERROR_CHECK(esp_event_handler_instance_unregister(WIFI_EVENT, ESP_EVENT_ANY_ID, instance_any_id));
////////////////////////// ENDE esp-idf 4.2 ///////////////////////////
*/
/* Deaktiveren, damit bei einen Verbindungsabbruch neu aufgebaut wird
ESP_ERROR_CHECK(esp_event_handler_unregister(IP_EVENT, IP_EVENT_STA_GOT_IP, &event_handler));
ESP_ERROR_CHECK(esp_event_handler_unregister(WIFI_EVENT, ESP_EVENT_ANY_ID, &event_handler));
vEventGroupDelete(s_wifi_event_group);
*/
/*
while (BlinkIsRunning)
{
vTaskDelay(100 / portTICK_PERIOD_MS);
}
*/
}
void wifi_init_sta(const char *_ssid, const char *_password, const char *_hostname)
{
wifi_init_sta(_ssid, _password, _hostname, NULL, NULL, NULL, NULL);
}
void wifi_init_sta(const char *_ssid, const char *_password)
{
wifi_init_sta(_ssid, _password, NULL, NULL, NULL, NULL, NULL);
}

View File

@@ -0,0 +1,16 @@
#ifndef CONNECT_WLAN_H
#define CONNECT_WLAN_H
#include <string>
void wifi_init_sta(const char *_ssid, const char *_password, const char *_hostname, const char *_ipadr, const char *_gw, const char *_netmask, const char *_dns);
void wifi_init_sta(const char *_ssid, const char *_password, const char *_hostname);
void wifi_init_sta(const char *_ssid, const char *_password);
std::string getIPAddress();
std::string getSSID();
extern std::string hostname;
extern std::string std_hostname;
#endif

View File

@@ -0,0 +1,257 @@
#include "read_wlanini.h"
#include "Helper.h"
#include "connect_wlan.h"
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <iostream>
#include <string.h>
std::vector<string> ZerlegeZeile(std::string input, std::string _delimiter = "")
{
std::vector<string> Output;
std::string delimiter = " =,";
if (_delimiter.length() > 0){
delimiter = _delimiter;
}
input = trim(input, delimiter);
size_t pos = findDelimiterPos(input, delimiter);
std::string token;
while (pos != std::string::npos) {
token = input.substr(0, pos);
token = trim(token, delimiter);
Output.push_back(token);
input.erase(0, pos + 1);
input = trim(input, delimiter);
pos = findDelimiterPos(input, delimiter);
}
Output.push_back(input);
return Output;
}
void LoadWlanFromFile(std::string fn, char *&_ssid, char *&_password, char *&_hostname, char *&_ipadr, char *&_gw, char *&_netmask, char *&_dns)
{
std::string ssid = "";
std::string passphrase = "";
std::string ipaddress = "";
std::string gw = "";
std::string netmask = "";
std::string dns = "";
std::string line = "";
std::vector<string> zerlegt;
hostname = std_hostname;
FILE* pFile;
fn = FormatFileName(fn);
pFile = OpenFileAndWait(fn.c_str(), "r");
printf("file loaded\n");
if (pFile == NULL)
return;
char zw[1024];
fgets(zw, 1024, pFile);
line = std::string(zw);
while ((line.size() > 0) || !(feof(pFile)))
{
// printf("%s", line.c_str());
zerlegt = ZerlegeZeile(line, "=");
zerlegt[0] = trim(zerlegt[0], " ");
for (int i = 2; i < zerlegt.size(); ++i)
zerlegt[1] = zerlegt[1] + "=" + zerlegt[i];
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
hostname = trim(zerlegt[1]);
if ((hostname[0] == '"') && (hostname[hostname.length()-1] == '"')){
hostname = hostname.substr(1, hostname.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "SSID")){
ssid = trim(zerlegt[1]);
if ((ssid[0] == '"') && (ssid[ssid.length()-1] == '"')){
ssid = ssid.substr(1, ssid.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "PASSWORD")){
passphrase = zerlegt[1];
if ((passphrase[0] == '"') && (passphrase[passphrase.length()-1] == '"')){
passphrase = passphrase.substr(1, passphrase.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){
ipaddress = zerlegt[1];
if ((ipaddress[0] == '"') && (ipaddress[ipaddress.length()-1] == '"')){
ipaddress = ipaddress.substr(1, ipaddress.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){
gw = zerlegt[1];
if ((gw[0] == '"') && (gw[gw.length()-1] == '"')){
gw = gw.substr(1, gw.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){
netmask = zerlegt[1];
if ((netmask[0] == '"') && (netmask[netmask.length()-1] == '"')){
netmask = netmask.substr(1, netmask.length()-2);
}
}
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){
dns = zerlegt[1];
if ((dns[0] == '"') && (dns[dns.length()-1] == '"')){
dns = dns.substr(1, dns.length()-2);
}
}
if (fgets(zw, 1024, pFile) == NULL)
{
line = "";
}
else
{
line = std::string(zw);
}
}
fclose(pFile);
// Check if Hostname was empty in .ini if yes set to std_hostname
if(hostname.length() == 0){
hostname = std_hostname;
}
_hostname = new char[hostname.length() + 1];
strcpy(_hostname, hostname.c_str());
_ssid = new char[ssid.length() + 1];
strcpy(_ssid, ssid.c_str());
_password = new char[passphrase.length() + 1];
strcpy(_password, passphrase.c_str());
if (ipaddress.length() > 0)
{
_ipadr = new char[ipaddress.length() + 1];
strcpy(_ipadr, ipaddress.c_str());
}
else
_ipadr = NULL;
if (gw.length() > 0)
{
_gw = new char[gw.length() + 1];
strcpy(_gw, gw.c_str());
}
else
_gw = NULL;
if (netmask.length() > 0)
{
_netmask = new char[netmask.length() + 1];
strcpy(_netmask, netmask.c_str());
}
else
_netmask = NULL;
if (dns.length() > 0)
{
_dns = new char[dns.length() + 1];
strcpy(_dns, dns.c_str());
}
else
_dns = NULL;
}
bool ChangeHostName(std::string fn, std::string _newhostname)
{
if (_newhostname == hostname)
return false;
string line = "";
std::vector<string> zerlegt;
bool found = false;
std::vector<string> neuesfile;
FILE* pFile;
fn = FormatFileName(fn);
pFile = OpenFileAndWait(fn.c_str(), "r");
printf("file loaded\n");
if (pFile == NULL)
return false;
char zw[1024];
fgets(zw, 1024, pFile);
line = std::string(zw);
while ((line.size() > 0) || !(feof(pFile)))
{
printf("%s", line.c_str());
zerlegt = ZerlegeZeile(line, "=");
zerlegt[0] = trim(zerlegt[0], " ");
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
line = "hostname = \"" + _newhostname + "\"\n";
found = true;
}
neuesfile.push_back(line);
if (fgets(zw, 1024, pFile) == NULL)
{
line = "";
}
else
{
line = std::string(zw);
}
}
if (!found)
{
line = "\nhostname = \"" + _newhostname + "\"\n";
neuesfile.push_back(line);
}
fclose(pFile);
pFile = OpenFileAndWait(fn.c_str(), "w+");
for (int i = 0; i < neuesfile.size(); ++i)
{
printf(neuesfile[i].c_str());
fputs(neuesfile[i].c_str(), pFile);
}
fclose(pFile);
printf("*** Update hostname done ***\n");
return true;
}

View File

@@ -0,0 +1,11 @@
#ifndef READ_WLANINI_H
#define READ_WLANINI_H
#include <string>
void LoadWlanFromFile(std::string fn, char *&_ssid, char *&_password, char *&_hostname, char *&_ipadr, char *&_gw, char *&_netmask, char *&_dns);
bool ChangeHostName(std::string fn, std::string _newhostname);
#endif

BIN
code/components/tfmicro.zip Normal file

Binary file not shown.

View File

@@ -23,7 +23,7 @@ if(NOT DEFINED ENV{IDF_PATH})
endif()
idf_component_register(
SRCS tensorflow/lite/micro/micro_error_reporter.cc tensorflow/lite/micro/simple_memory_allocator.cc tensorflow/lite/micro/memory_helpers.cc tensorflow/lite/micro/test_helpers.cc tensorflow/lite/micro/recording_micro_allocator.cc tensorflow/lite/micro/micro_time.cc tensorflow/lite/micro/recording_simple_memory_allocator.cc tensorflow/lite/micro/micro_string.cc tensorflow/lite/micro/micro_profiler.cc tensorflow/lite/micro/debug_log.cc tensorflow/lite/micro/all_ops_resolver.cc tensorflow/lite/micro/micro_utils.cc tensorflow/lite/micro/micro_interpreter.cc tensorflow/lite/micro/micro_allocator.cc tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc tensorflow/lite/micro/memory_planner/linear_memory_planner.cc tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc tensorflow/lite/micro/testing/test_conv_model.cc tensorflow/lite/c/common.c tensorflow/lite/core/api/error_reporter.cc tensorflow/lite/core/api/flatbuffer_conversions.cc tensorflow/lite/core/api/op_resolver.cc tensorflow/lite/core/api/tensor_utils.cc tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/kernel_util.cc tensorflow/lite/schema/schema_utils.cc tensorflow/lite/micro/kernels/prelu.cc tensorflow/lite/micro/kernels/dequantize.cc tensorflow/lite/micro/kernels/pad.cc tensorflow/lite/micro/kernels/shape.cc tensorflow/lite/micro/kernels/l2norm.cc tensorflow/lite/micro/kernels/tanh.cc tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc tensorflow/lite/micro/kernels/logical.cc tensorflow/lite/micro/kernels/kernel_util.cc tensorflow/lite/micro/kernels/ceil.cc tensorflow/lite/micro/kernels/arg_min_max.cc tensorflow/lite/micro/kernels/softmax.cc tensorflow/lite/micro/kernels/sub.cc tensorflow/lite/micro/kernels/add.cc tensorflow/lite/micro/kernels/floor.cc tensorflow/lite/micro/kernels/kernel_runner.cc tensorflow/lite/micro/kernels/split_v.cc tensorflow/lite/micro/kernels/hard_swish.cc tensorflow/lite/micro/kernels/pooling.cc tensorflow/lite/micro/kernels/concatenation.cc tensorflow/lite/micro/kernels/mul.cc tensorflow/lite/micro/kernels/unpack.cc tensorflow/lite/micro/kernels/round.cc tensorflow/lite/micro/kernels/quantize.cc tensorflow/lite/micro/kernels/ethosu.cc tensorflow/lite/micro/kernels/svdf.cc tensorflow/lite/micro/kernels/maximum_minimum.cc tensorflow/lite/micro/kernels/reshape.cc tensorflow/lite/micro/kernels/reduce.cc tensorflow/lite/micro/kernels/strided_slice.cc tensorflow/lite/micro/kernels/neg.cc tensorflow/lite/micro/kernels/pack.cc tensorflow/lite/micro/kernels/elementwise.cc tensorflow/lite/micro/kernels/comparisons.cc tensorflow/lite/micro/kernels/fully_connected.cc tensorflow/lite/micro/kernels/depthwise_conv.cc tensorflow/lite/micro/kernels/split.cc tensorflow/lite/micro/kernels/logistic.cc tensorflow/lite/micro/kernels/circular_buffer.cc tensorflow/lite/micro/kernels/conv.cc tensorflow/lite/micro/kernels/activations.cc
SRCS tensorflow/lite/micro/simple_memory_allocator.cc tensorflow/lite/micro/debug_log.cc tensorflow/lite/micro/micro_error_reporter.cc tensorflow/lite/micro/memory_helpers.cc tensorflow/lite/micro/test_helpers.cc tensorflow/lite/micro/recording_micro_allocator.cc tensorflow/lite/micro/micro_time.cc tensorflow/lite/micro/recording_simple_memory_allocator.cc tensorflow/lite/micro/micro_string.cc tensorflow/lite/micro/micro_profiler.cc tensorflow/lite/micro/flatbuffer_utils.cc tensorflow/lite/micro/micro_graph.cc tensorflow/lite/micro/mock_micro_graph.cc tensorflow/lite/micro/all_ops_resolver.cc tensorflow/lite/micro/micro_utils.cc tensorflow/lite/micro/micro_interpreter.cc tensorflow/lite/micro/micro_allocator.cc tensorflow/lite/micro/system_setup.cc tensorflow/lite/micro/memory_planner/linear_memory_planner.cc tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc tensorflow/lite/schema/schema_utils.cc tensorflow/lite/c/common.c tensorflow/lite/core/api/tensor_utils.cc tensorflow/lite/core/api/error_reporter.cc tensorflow/lite/core/api/flatbuffer_conversions.cc tensorflow/lite/core/api/op_resolver.cc tensorflow/lite/kernels/kernel_util.cc tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc tensorflow/lite/micro/kernels/activations.cc tensorflow/lite/micro/kernels/activations_common.cc tensorflow/lite/micro/kernels/add.cc tensorflow/lite/micro/kernels/add_n.cc tensorflow/lite/micro/kernels/arg_min_max.cc tensorflow/lite/micro/kernels/batch_to_space_nd.cc tensorflow/lite/micro/kernels/cast.cc tensorflow/lite/micro/kernels/ceil.cc tensorflow/lite/micro/kernels/circular_buffer.cc tensorflow/lite/micro/kernels/comparisons.cc tensorflow/lite/micro/kernels/concatenation.cc tensorflow/lite/micro/kernels/conv.cc tensorflow/lite/micro/kernels/conv_common.cc tensorflow/lite/micro/kernels/cumsum.cc tensorflow/lite/micro/kernels/depth_to_space.cc tensorflow/lite/micro/kernels/depthwise_conv.cc tensorflow/lite/micro/kernels/depthwise_conv_common.cc tensorflow/lite/micro/kernels/dequantize.cc tensorflow/lite/micro/kernels/detection_postprocess.cc tensorflow/lite/micro/kernels/elementwise.cc tensorflow/lite/micro/kernels/elu.cc tensorflow/lite/micro/kernels/ethosu.cc tensorflow/lite/micro/kernels/exp.cc tensorflow/lite/micro/kernels/expand_dims.cc tensorflow/lite/micro/kernels/fill.cc tensorflow/lite/micro/kernels/floor.cc tensorflow/lite/micro/kernels/floor_div.cc tensorflow/lite/micro/kernels/floor_mod.cc tensorflow/lite/micro/kernels/fully_connected.cc tensorflow/lite/micro/kernels/fully_connected_common.cc tensorflow/lite/micro/kernels/gather.cc tensorflow/lite/micro/kernels/gather_nd.cc tensorflow/lite/micro/kernels/hard_swish.cc tensorflow/lite/micro/kernels/hard_swish_common.cc tensorflow/lite/micro/kernels/if.cc tensorflow/lite/micro/kernels/kernel_runner.cc tensorflow/lite/micro/kernels/kernel_util.cc tensorflow/lite/micro/kernels/l2norm.cc tensorflow/lite/micro/kernels/l2_pool_2d.cc tensorflow/lite/micro/kernels/leaky_relu.cc tensorflow/lite/micro/kernels/logical.cc tensorflow/lite/micro/kernels/logical_common.cc tensorflow/lite/micro/kernels/logistic.cc tensorflow/lite/micro/kernels/logistic_common.cc tensorflow/lite/micro/kernels/log_softmax.cc tensorflow/lite/micro/kernels/maximum_minimum.cc tensorflow/lite/micro/kernels/mul.cc tensorflow/lite/micro/kernels/neg.cc tensorflow/lite/micro/kernels/pack.cc tensorflow/lite/micro/kernels/pad.cc tensorflow/lite/micro/kernels/pooling.cc tensorflow/lite/micro/kernels/pooling_common.cc tensorflow/lite/micro/kernels/prelu.cc tensorflow/lite/micro/kernels/quantize.cc tensorflow/lite/micro/kernels/quantize_common.cc tensorflow/lite/micro/kernels/reduce.cc tensorflow/lite/micro/kernels/reshape.cc tensorflow/lite/micro/kernels/resize_bilinear.cc tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc tensorflow/lite/micro/kernels/round.cc tensorflow/lite/micro/kernels/shape.cc tensorflow/lite/micro/kernels/softmax.cc tensorflow/lite/micro/kernels/softmax_common.cc tensorflow/lite/micro/kernels/space_to_batch_nd.cc tensorflow/lite/micro/kernels/space_to_depth.cc tensorflow/lite/micro/kernels/split.cc tensorflow/lite/micro/kernels/split_v.cc tensorflow/lite/micro/kernels/squeeze.cc tensorflow/lite/micro/kernels/strided_slice.cc tensorflow/lite/micro/kernels/sub.cc tensorflow/lite/micro/kernels/svdf.cc tensorflow/lite/micro/kernels/svdf_common.cc tensorflow/lite/micro/kernels/tanh.cc tensorflow/lite/micro/kernels/transpose.cc tensorflow/lite/micro/kernels/transpose_conv.cc tensorflow/lite/micro/kernels/unpack.cc tensorflow/lite/micro/kernels/zeros_like.cc
INCLUDE_DIRS . third_party/gemmlowp third_party/flatbuffers/include third_party/ruy)
# Reduce the level of paranoia to be able to compile TF sources
@@ -32,7 +32,7 @@ target_compile_options(${COMPONENT_LIB} PRIVATE
-Wno-missing-field-initializers
-Wno-type-limits)
target_compile_options(${COMPONENT_LIB} PRIVATE -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter)
target_compile_options(${COMPONENT_LIB} PRIVATE $<$<COMPILE_LANGUAGE:CXX>: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter >)
target_compile_options(${COMPONENT_LIB} PRIVATE -Wimplicit-function-declaration -Werror -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP)
target_compile_options(${COMPONENT_LIB} PRIVATE $<$<COMPILE_LANGUAGE:CXX>: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -Werror -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP >)
target_compile_options(${COMPONENT_LIB} INTERFACE $<$<IN_LIST:-DTF_LITE_STATIC_MEMORY,$<TARGET_PROPERTY:${COMPONENT_LIB},COMPILE_OPTIONS>>:-DTF_LITE_STATIC_MEMORY>)
target_link_libraries(${COMPONENT_LIB} PRIVATE -lm)

View File

@@ -1,139 +0,0 @@
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_PUBLIC_VERSION_H_
#define TENSORFLOW_CORE_PUBLIC_VERSION_H_
// TensorFlow uses semantic versioning, see http://semver.org/.
// Also update tensorflow/tensorflow.bzl and
// tensorflow/tools/pip_package/setup.py
#define TF_MAJOR_VERSION 2
#define TF_MINOR_VERSION 5
#define TF_PATCH_VERSION 0
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")
#define TF_VERSION_SUFFIX ""
#define TF_STR_HELPER(x) #x
#define TF_STR(x) TF_STR_HELPER(x)
// e.g. "0.5.0" or "0.6.0-alpha".
#define TF_VERSION_STRING \
(TF_STR(TF_MAJOR_VERSION) "." TF_STR(TF_MINOR_VERSION) "." TF_STR( \
TF_PATCH_VERSION) TF_VERSION_SUFFIX)
// GraphDef compatibility versions (the versions field in graph.proto).
//
// Each graph has producer and min_consumer versions, and each
// consumer has its own version and a min_producer. In addition, graphs can
// mark specific consumer versions as bad (to prevent bugs from executing).
// A consumer will execute a graph if the consumer's version is at least the
// graph's min_consumer, the graph's producer version is at least the consumer's
// min_producer, and the consumer version isn't specifically disallowed by the
// graph.
//
// By default, newly created graphs have producer version TF_GRAPH_DEF_VERSION
// min_consumer TF_GRAPH_DEF_MIN_CONSUMER, and no other bad consumer versions.
//
// Version history:
//
// 0. Graphs created before GraphDef versioning
// 1. First real version (2dec2015)
// 2. adjust_contrast only takes float, doesn't perform clamping (11dec2015)
// 3. Remove TileGrad, since it was equivalent to reduce_sum (30dec2015)
// 4. When support for this version is removed, we can safely make AttrValue
// parsing more strict with respect to empty list values (see
// 111635679, 7jan2016).
// 5. Graphs are wholly-validated during Session::Create() (7jan2016).
// 6. TensorFlow is scalar strict within Google (27jan2016).
// 7. Remove TopK in favor of TopKV2 (5feb2016).
// 8. Replace RandomCrop from C++ with pure Python (5feb2016).
// 9. Deprecate batch_norm_with_global_normalization (16feb2016).
// 10. Deprecate conv3d_backprop_{filter,input} (10jun2016).
// 11. Deprecate {batch}_self_adjoint_eig (3aug2016).
// 12. Graph consumers understand the node_def field of FunctionDef (22aug2016).
// 13. Deprecate multiple batch linear algebra ops (9sep2016).
// 14. Deprecate batch_matrix_* ops. (10sep2016).
// 15. Deprecate batch_fft_* ops. (14sep2016).
// 16. Deprecate tensor_array (v1) ops in favor of v2 (10nov2016).
// 17. Deprecate inv (11nov2016).
// 17. Expose reverse_v2 (10nov2016)
// 18. Add VariableV2 (30nov2016)
// 19. Deprecated ops created by models moved out of core SkipGram, NegTrain.
// (08dec2016)
// 20. Catch all version 1.0 changes to Python API generation. SplitV is now
// used for tf.split, ReverseV2 is now used by tf.reverse, ConcatV2 is
// now used by tf.concat. Graphs use flooring
// division and mod semantics. TensorArrayV3. (12dec2016)
// Also considered the version for when it is required for reduction
// ops' indices to be scalar or vector, and not higher rank.
// Some earlier graph def versions allowed this.
// 21. Dropped FunctionDef.Node support, switched to node_def introduced
// in version 12. (11jan2017)
// 22. Placeholder now can specify and enforce scalar and partial
// shapes, particularly when restoring a graph from GraphDef
// produced at version 22 or later. (04/10/2016)
// 23. Remove NonMaxSuppression in favor of NonMaxSuppressionV2.
// 24. Deprecate lookup ops (v1) ops in favor of v2 (30may2017)
// 25. Deprecate stack (v1) ops in favor of v2 (2017/6/15).
// 25. Deprecate RandomPoisson (v1) ops in favor of v2 (2017/10/25).
// 26. Add a bool 'stripped_default_attrs' to MetaInfoDef indicating
// whether default-valued attrs have been stripped from the nodes in the
// GraphDef. (7dec2017)
// 27. Deprecate TensorArray ops v2 in favor of v3 and deprecated io_ops
// deprecated in favor of V2 ops. (2018/01/23)
// 28. Deprecate MatrixExponential op in favor of Python implementation.
// (2018/08/21).
// (2019/02/15). Added `control_ret` field to FunctionDef proto, and
// `control_output` field to OpDef proto.
// 29. Deprecate StatefulStandardNormal op in favor of StatefulStandardNormalV2.
// (2019/03/25).
// (2019/04/17). Added `arg_attr` field to FunctionDefProto.
// 30. (2019/05/09) First date based GraphDef version. GraphDef
// versions advance by 1 each day after this point.
#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0
#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0
#define TF_GRAPH_DEF_VERSION 578 // Updated: 2020/11/7
// Checkpoint compatibility versions (the versions field in SavedSliceMeta).
//
// The checkpoint versions have the same semantics as GraphDef versions, but the
// numbering scheme is separate. We have no plans to ever deprecate checkpoint
// versions, but it's good to have this in place in case we ever need to.
//
// Version history:
//
// 0. Checkpoints saved before checkpoint versioning.
// 1. First real version (10feb2015).
#define TF_CHECKPOINT_VERSION_MIN_PRODUCER 0
#define TF_CHECKPOINT_VERSION_MIN_CONSUMER 0
#define TF_CHECKPOINT_VERSION 1
/// Version query functions (defined in generated version_info.cc)
// Host compiler version (declared elsewhere to be __VERSION__)
extern const char* tf_compiler_version();
// The git commit designator when tensorflow was built
// If no git repository, this will be "internal".
extern const char* tf_git_version();
// Value of the _GLIBCXX_USE_CXX11_ABI flag, or 0 if it's not set.
extern int tf_cxx11_abi_flag();
// Returns 1 if build is monolithic, or 0 otherwise.
extern int tf_monolithic_build();
#endif // TENSORFLOW_CORE_PUBLIC_VERSION_H_

View File

@@ -63,13 +63,11 @@ typedef struct {
} TfLiteMirrorPaddingParams;
// Possible fused activation functions.
// TODO(aselle): rename to TfLiteActivation
typedef enum {
kTfLiteActNone = 0,
kTfLiteActRelu,
kTfLiteActReluN1To1, // min(max(-1, x), 1)
kTfLiteActRelu1 = kTfLiteActReluN1To1, // kTfLiteActRelu1 will be deprecated.
kTfLiteActRelu6, // min(max(0, x), 6)
kTfLiteActReluN1To1, // min(max(-1, x), 1)
kTfLiteActRelu6, // min(max(0, x), 6)
kTfLiteActTanh,
kTfLiteActSignBit,
kTfLiteActSigmoid,
@@ -88,6 +86,19 @@ typedef struct {
int dilation_height_factor;
} TfLiteConvParams;
typedef struct {
TfLitePadding padding;
int stride_width;
int stride_height;
int stride_depth;
int dilation_width_factor;
int dilation_height_factor;
int dilation_depth_factor;
TfLiteFusedActivation activation;
} TfLiteConv3DParams;
typedef TfLiteConv3DParams TfLiteConv3DTransposeParams;
typedef struct {
TfLitePadding padding;
int stride_width;
@@ -214,6 +225,10 @@ typedef struct {
typedef struct {
bool adj_x;
bool adj_y;
// Parameters for BatchMatMul version 4 or above.
// If set to true and the weights are quantized, then non constant inputs
// are quantized at evaluation time with asymmetric quantization.
bool asymmetric_quantize_inputs;
} TfLiteBatchMatMulParams;
typedef struct {
@@ -314,8 +329,9 @@ typedef struct {
} TfLitePadV2Params;
typedef struct {
// TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
// For now we will fix the maximum possible number of dimensions.
// These fields are only used in old models for backward compatibility.
// In the current implementation, we use the 2nd input of the op as the shape,
// and these fields are unused.
int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT];
int num_dimensions;
} TfLiteReshapeParams;
@@ -351,6 +367,7 @@ typedef struct {
typedef struct {
int axis;
int batch_dims;
} TfLiteGatherParams;
typedef struct {
@@ -474,6 +491,17 @@ typedef struct {
int init_subgraph_index;
} TfLiteCallOnceParams;
typedef struct {
int table_id;
TfLiteType key_dtype;
TfLiteType value_dtype;
} TfLiteHashtableParams;
typedef struct {
const char* container;
const char* shared_name;
} TfLiteVarHandleParams;
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus

View File

@@ -0,0 +1,109 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This file declares types used by the pure C inference API defined in c_api.h,
// some of which are also used in the C++ and C kernel and interpreter APIs.
#ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_
#define TENSORFLOW_LITE_C_C_API_TYPES_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
// Define TFL_CAPI_EXPORT macro to export a function properly with a shared
// library.
#ifdef SWIG
#define TFL_CAPI_EXPORT
#elif defined(TFL_STATIC_LIBRARY_BUILD)
#define TFL_CAPI_EXPORT
#else // not definded TFL_STATIC_LIBRARY_BUILD
#if defined(_WIN32)
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
#else
#define TFL_CAPI_EXPORT __declspec(dllimport)
#endif // TFL_COMPILE_LIBRARY
#else
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
typedef enum TfLiteStatus {
kTfLiteOk = 0,
// Generally referring to an error in the runtime (i.e. interpreter)
kTfLiteError = 1,
// Generally referring to an error from a TfLiteDelegate itself.
kTfLiteDelegateError = 2,
// Generally referring to an error in applying a delegate due to
// incompatibility between runtime and delegate, e.g., this error is returned
// when trying to apply a TfLite delegate onto a model graph that's already
// immutable.
kTfLiteApplicationError = 3,
// Generally referring to serialized delegate data not being found.
// See tflite::delegates::Serialization.
kTfLiteDelegateDataNotFound = 4,
// Generally referring to data-writing issues in delegate serialization.
// See tflite::delegates::Serialization.
kTfLiteDelegateDataWriteError = 5,
// Generally referring to data-reading issues in delegate serialization.
// See tflite::delegates::Serialization.
kTfLiteDelegateDataReadError = 5,
} TfLiteStatus;
// Types supported by tensor
typedef enum {
kTfLiteNoType = 0,
kTfLiteFloat32 = 1,
kTfLiteInt32 = 2,
kTfLiteUInt8 = 3,
kTfLiteInt64 = 4,
kTfLiteString = 5,
kTfLiteBool = 6,
kTfLiteInt16 = 7,
kTfLiteComplex64 = 8,
kTfLiteInt8 = 9,
kTfLiteFloat16 = 10,
kTfLiteFloat64 = 11,
kTfLiteComplex128 = 12,
kTfLiteUInt64 = 13,
kTfLiteResource = 14,
kTfLiteVariant = 15,
kTfLiteUInt32 = 16,
} TfLiteType;
// Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
// If per-layer quantization is specified this field will still be populated in
// addition to TfLiteAffineQuantization.
// Parameters for asymmetric quantization. Quantized values can be converted
// back to float using:
// real_value = scale * (quantized_value - zero_point)
typedef struct TfLiteQuantizationParams {
float scale;
int32_t zero_point;
} TfLiteQuantizationParams;
#ifdef __cplusplus
} // extern C
#endif
#endif // TENSORFLOW_LITE_C_C_API_TYPES_H_

View File

@@ -14,6 +14,8 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/c/c_api_types.h"
#ifndef TF_LITE_STATIC_MEMORY
#include <stdlib.h>
#include <string.h>
@@ -43,8 +45,10 @@ int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
#ifndef TF_LITE_STATIC_MEMORY
TfLiteIntArray* TfLiteIntArrayCreate(int size) {
TfLiteIntArray* ret =
(TfLiteIntArray*)malloc(TfLiteIntArrayGetSizeInBytes(size));
int alloc_size = TfLiteIntArrayGetSizeInBytes(size);
if (alloc_size <= 0) return NULL;
TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
if (!ret) return ret;
ret->size = size;
return ret;
}
@@ -179,9 +183,9 @@ void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
}
// TODO(b/145340303): Tensor data should be aligned.
if (!tensor->data.raw) {
tensor->data.raw = malloc(num_bytes);
tensor->data.raw = (char*)malloc(num_bytes);
} else if (num_bytes > tensor->bytes) {
tensor->data.raw = realloc(tensor->data.raw, num_bytes);
tensor->data.raw = (char*)realloc(tensor->data.raw, num_bytes);
}
tensor->bytes = num_bytes;
}
@@ -197,12 +201,16 @@ const char* TfLiteTypeGetName(TfLiteType type) {
return "INT16";
case kTfLiteInt32:
return "INT32";
case kTfLiteUInt32:
return "UINT32";
case kTfLiteUInt8:
return "UINT8";
case kTfLiteInt8:
return "INT8";
case kTfLiteInt64:
return "INT64";
case kTfLiteUInt64:
return "UINT64";
case kTfLiteBool:
return "BOOL";
case kTfLiteComplex64:
@@ -215,11 +223,15 @@ const char* TfLiteTypeGetName(TfLiteType type) {
return "FLOAT16";
case kTfLiteFloat64:
return "FLOAT64";
case kTfLiteResource:
return "RESOURCE";
case kTfLiteVariant:
return "VARIANT";
}
return "Unknown type";
}
TfLiteDelegate TfLiteDelegateCreate() {
TfLiteDelegate TfLiteDelegateCreate(void) {
TfLiteDelegate d = {
.data_ = NULL,
.Prepare = NULL,

View File

@@ -40,26 +40,12 @@ limitations under the License.
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/c/c_api_types.h" // IWYU pragma: export
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
typedef enum TfLiteStatus {
kTfLiteOk = 0,
// Generally referring to an error in the runtime (i.e. interpreter)
kTfLiteError = 1,
// Generally referring to an error from a TfLiteDelegate itself.
kTfLiteDelegateError = 2,
// Generally referring to an error in applying a delegate due to
// incompatibility between runtime and delegate, e.g., this error is returned
// when trying to apply a TfLite delegate onto a model graph that's already
// immutable.
kTfLiteApplicationError = 3
} TfLiteStatus;
// The list of external context types known to TF Lite. This list exists solely
// to avoid conflicts and to ensure ops can share the external contexts they
// need. Access to the external contexts is controlled by one of the
@@ -80,7 +66,7 @@ struct TfLiteRegistration;
// An external context is a collection of information unrelated to the TF Lite
// framework, but useful to a subset of the ops. TF Lite knows very little
// about about the actual contexts, but it keeps a list of them, and is able to
// about the actual contexts, but it keeps a list of them, and is able to
// refresh them if configurations like the number of recommended threads
// change.
typedef struct TfLiteExternalContext {
@@ -98,7 +84,8 @@ typedef struct TfLiteIntArray {
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
__GNUC_MINOR__ >= 1) || \
defined(HEXAGON) || (__clang_major__ == 7 && __clang_minor__ == 1)
defined(HEXAGON) || \
(defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1)
int data[0];
#else
int data[];
@@ -254,22 +241,6 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
} \
} while (0)
// Define TFL_CAPI_EXPORT macro to export a function properly with a shared
// library.
#ifdef SWIG
#define TFL_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
#else
#define TFL_CAPI_EXPORT __declspec(dllimport)
#endif // TFL_COMPILE_LIBRARY
#else
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
// Single-precision complex data type compatible with the C99 definition.
typedef struct TfLiteComplex64 {
float re, im; // real and imaginary parts, respectively.
@@ -285,23 +256,6 @@ typedef struct TfLiteFloat16 {
uint16_t data;
} TfLiteFloat16;
// Types supported by tensor
typedef enum {
kTfLiteNoType = 0,
kTfLiteFloat32 = 1,
kTfLiteInt32 = 2,
kTfLiteUInt8 = 3,
kTfLiteInt64 = 4,
kTfLiteString = 5,
kTfLiteBool = 6,
kTfLiteInt16 = 7,
kTfLiteComplex64 = 8,
kTfLiteInt8 = 9,
kTfLiteFloat16 = 10,
kTfLiteFloat64 = 11,
kTfLiteComplex128 = 12,
} TfLiteType;
// Return the name of a given type, for error reporting purposes.
const char* TfLiteTypeGetName(TfLiteType type);
@@ -318,22 +272,12 @@ typedef enum TfLiteQuantizationType {
typedef struct TfLiteQuantization {
// The type of quantization held by params.
TfLiteQuantizationType type;
// Holds a reference to one of the quantization param structures specified
// below.
// Holds an optional reference to a quantization param structure. The actual
// type depends on the value of the `type` field (see the comment there for
// the values and corresponding types).
void* params;
} TfLiteQuantization;
// Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
// If per-layer quantization is specified this field will still be populated in
// addition to TfLiteAffineQuantization.
// Parameters for asymmetric quantization. Quantized values can be converted
// back to float using:
// real_value = scale * (quantized_value - zero_point)
typedef struct TfLiteQuantizationParams {
float scale;
int32_t zero_point;
} TfLiteQuantizationParams;
// Parameters for asymmetric quantization across a dimension (i.e per output
// channel quantization).
// quantized_dimension specifies which dimension the scales and zero_points
@@ -353,7 +297,9 @@ typedef union TfLitePtrUnion {
* GetTensorData<TYPE>(tensor) instead, otherwise only access .data, as other
* members are deprecated. */
int32_t* i32;
uint32_t* u32;
int64_t* i64;
uint64_t* u64;
float* f;
TfLiteFloat16* f16;
double* f64;
@@ -430,6 +376,17 @@ typedef struct TfLiteCustomAllocation {
size_t bytes;
} TfLiteCustomAllocation;
// The flags used in `Interpreter::SetCustomAllocationForTensor`.
// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc.
typedef enum TfLiteCustomAllocationFlags {
kTfLiteCustomAllocationFlagsNone = 0,
// Skips checking whether allocation.data points to an aligned buffer as
// expected by the TFLite runtime.
// NOTE: Setting this flag can cause crashes when calling Invoke().
// Use with caution.
kTfLiteCustomAllocationFlagsSkipAlignCheck = 1,
} TfLiteCustomAllocationFlags;
// A tensor in the interpreter system which is a wrapper around a buffer of
// data including a dimensionality (or NULL if not currently defined).
#ifndef TF_LITE_STATIC_MEMORY
@@ -499,8 +456,8 @@ typedef struct TfLiteTensor {
} TfLiteTensor;
// A structure representing an instance of a node.
// This structure only exhibits the inputs, outputs and user defined data, not
// other features like the type.
// This structure only exhibits the inputs, outputs, user defined data and some
// node properties (like statefulness), not other features like the type.
typedef struct TfLiteNode {
// Inputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* inputs;
@@ -533,8 +490,11 @@ typedef struct TfLiteNode {
// created by calling `interpreter.ModifyGraphWithDelegate`.
// WARNING: This is an experimental interface that is subject to change.
struct TfLiteDelegate* delegate;
// Whether this op might have side effect (e.g. stateful op).
bool might_have_side_effect;
} TfLiteNode;
#else // defined(TF_LITE_STATIC_MEMORY)?
#else // defined(TF_LITE_STATIC_MEMORY)?
// NOTE: This flag is opt-in only at compile time.
//
// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct
@@ -683,6 +643,7 @@ typedef struct TfLiteContext {
// TfLiteDelegates can traverse the current execution plan by iterating
// through each member of this array and using GetNodeAndRegistration() to
// access details about a node. i.e.
//
// TfLiteIntArray* execution_plan;
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
// for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
@@ -691,6 +652,28 @@ typedef struct TfLiteContext {
// TfLiteRegistration* reg;
// context->GetNodeAndRegistration(context, node_index, &node, &reg);
// }
// Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime.
// Future calls to GetExecutionPlan invalidates earlier outputs. The following
// code snippet shows the issue of such an invocation pattern. After calling
// CheckNode, subsequent access to `plan_1st` is undefined.
//
// void CheckNode(const TfLiteNode* node) {
// ...
// TfLiteIntArray* plan_2nd;
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd));
// ...
// }
//
// TfLiteIntArray* plan_1st;
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st));
// for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) {
// int node_index = plan_1st->data[exec_index];
// TfLiteNode* node;
// TfLiteRegistration* reg;
// context->GetNodeAndRegistration(context, node_index, &node, &reg);
// CheckNode(node);
// }
//
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
TfLiteIntArray** execution_plan);
@@ -820,6 +803,18 @@ typedef struct TfLiteContext {
// WARNING: This method may not be available on all platforms.
TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
int tensor_idx);
// Retrieves named metadata buffer from the TFLite model.
// Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer
// Model: that is, there exists a `metadata` entry with given `name` string.
// (see TFLite's schema.fbs).
// The corresponding `buffer` information is populated in `ptr` & `bytes`.
// The data from `ptr` is valid for the lifetime of the Interpreter.
//
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus (*GetModelMetadata)(const struct TfLiteContext* context,
const char* name, const char** ptr,
size_t* bytes);
} TfLiteContext;
typedef struct TfLiteRegistration {
@@ -961,7 +956,7 @@ typedef struct TfLiteDelegate {
// Build a 'null' delegate, with all the fields properly set to their default
// values.
TfLiteDelegate TfLiteDelegateCreate();
TfLiteDelegate TfLiteDelegateCreate(void);
#ifdef __cplusplus
} // extern "C"

View File

@@ -169,6 +169,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseAdd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD_N: {
return ParseAddN(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MAX: {
return ParseArgMax(op, error_reporter, allocator, builtin_data);
}
@@ -181,6 +185,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_MATMUL: {
return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_TO_SPACE_ND: {
return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CEIL: {
return ParseCeil(op, error_reporter, allocator, builtin_data);
}
@@ -193,6 +205,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CUMSUM: {
return ParseCumsum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTH_TO_SPACE: {
return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTHWISE_CONV_2D: {
return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
}
@@ -201,14 +221,46 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseDequantize(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DIV: {
return ParseDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ELU: {
return ParseElu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXP: {
return ParseExp(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXPAND_DIMS: {
return ParseExpandDims(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FILL: {
return ParseFill(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR: {
return ParseFloor(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_DIV: {
return ParseFloorDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_MOD: {
return ParseFloorMod(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FULLY_CONNECTED: {
return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GATHER_ND: {
return ParseGatherNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER: {
return ParseGreater(op, error_reporter, allocator, builtin_data);
}
@@ -229,6 +281,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LEAKY_RELU: {
return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LESS: {
return ParseLess(op, error_reporter, allocator, builtin_data);
}
@@ -257,6 +313,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseLogistic(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOG_SOFTMAX: {
return ParseLogSoftmax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MAXIMUM: {
return ParseMaximum(op, error_reporter, allocator, builtin_data);
}
@@ -297,6 +357,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParsePadV2(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_POW: {
return ParsePow(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PRELU: {
return ParsePrelu(op, error_reporter, allocator, builtin_data);
}
@@ -309,6 +373,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_ALL: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_MAX: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
@@ -362,6 +430,14 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseSoftmax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPACE_TO_BATCH_ND: {
return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPACE_TO_DEPTH: {
return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPLIT: {
return ParseSplit(op, error_reporter, allocator, builtin_data);
}
@@ -378,6 +454,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseSquare(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQUEEZE: {
return ParseSqueeze(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STRIDED_SLICE: {
return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
}
@@ -398,23 +478,20 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseTanh(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_TRANSPOSE_CONV: {
return ParseTransposeConv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_UNPACK: {
return ParseUnpack(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ZEROS_LIKE: {
return ParseZerosLike(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CAST: {
auto params = safe_allocator.Allocate<TfLiteCastParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->in_data_type(),
&params->in_data_type,
error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
&params->out_data_type,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
return ParseCast(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LSH_PROJECTION: {
auto params = safe_allocator.Allocate<TfLiteLSHProjectionParams>();
@@ -483,16 +560,7 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_HASHTABLE_LOOKUP:
// no-op.
return kTfLiteOk;
case BuiltinOperator_DIV: {
auto params = safe_allocator.Allocate<TfLiteDivParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -584,66 +652,9 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_SPACE_TO_DEPTH: {
auto params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params =
op->builtin_options_as_SpaceToDepthOptions()) {
params->block_size = schema_params->block_size();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_DEPTH_TO_SPACE: {
auto params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params =
op->builtin_options_as_DepthToSpaceOptions()) {
params->block_size = schema_params->block_size();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_GATHER: {
auto params = safe_allocator.Allocate<TfLiteGatherParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
params->axis = 0;
if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
params->axis = gather_params->axis();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_SQUEEZE: {
auto params = safe_allocator.Allocate<TfLiteSqueezeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_SqueezeOptions()) {
const auto* squeeze_dims = schema_params->squeeze_dims();
if (squeeze_dims != nullptr) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
error_reporter, "squeeze"));
params->num_squeeze_dims = squeeze_dims->size();
} else {
params->num_squeeze_dims = 0;
}
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_TRANSPOSE_CONV: {
auto params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* transpose_conv_params =
op->builtin_options_as_TransposeConvOptions()) {
params->padding = ConvertPadding(transpose_conv_params->padding());
params->stride_width = transpose_conv_params->stride_w();
params->stride_height = transpose_conv_params->stride_h();
}
*builtin_data = params.release();
return kTfLiteOk;
return ParseGather(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPARSE_TO_DENSE: {
auto params = safe_allocator.Allocate<TfLiteSparseToDenseParams>();
@@ -656,7 +667,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return kTfLiteOk;
}
case BuiltinOperator_DELEGATE: {
// TODO(ycling): Revisit when supporting saving delegated models.
TF_LITE_REPORT_ERROR(error_reporter,
"DELEGATE op shouldn't exist in model.");
return kTfLiteError;
@@ -683,16 +693,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_LEAKY_RELU: {
auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* leaky_relu_params =
op->builtin_options_as_LeakyReluOptions()) {
params->alpha = leaky_relu_params->alpha();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_MIRROR_PAD: {
auto params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -750,17 +750,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_BATCH_MATMUL: {
auto params = safe_allocator.Allocate<TfLiteBatchMatMulParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bmm_params =
op->builtin_options_as_BatchMatMulOptions()) {
params->adj_x = bmm_params->adj_x();
params->adj_y = bmm_params->adj_y();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_CALL_ONCE: {
auto params = safe_allocator.Allocate<TfLiteCallOnceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -771,50 +760,75 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_CUMSUM: {
auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
case BuiltinOperator_CONV_3D:
case BuiltinOperator_CONV_3D_TRANSPOSE: {
auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
params->exclusive = cumsum_params->exclusive();
params->reverse = cumsum_params->reverse();
if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {
params->padding = ConvertPadding(conv3d_params->padding());
params->activation =
ConvertActivation(conv3d_params->fused_activation_function());
params->stride_depth = conv3d_params->stride_d();
params->stride_height = conv3d_params->stride_h();
params->stride_width = conv3d_params->stride_w();
params->dilation_depth_factor = conv3d_params->dilation_d_factor();
params->dilation_height_factor = conv3d_params->dilation_h_factor();
params->dilation_width_factor = conv3d_params->dilation_w_factor();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_HASHTABLE: {
auto params = safe_allocator.Allocate<TfLiteHashtableParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* hashtable_params =
op->builtin_options_as_HashtableOptions()) {
params->table_id = hashtable_params->table_id();
TF_LITE_ENSURE_STATUS(ConvertTensorType(
hashtable_params->key_dtype(), &params->key_dtype, error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(),
&params->value_dtype,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_VAR_HANDLE: {
auto params = safe_allocator.Allocate<TfLiteVarHandleParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
params->container = nullptr;
params->shared_name = nullptr;
if (const auto* var_handle_params =
op->builtin_options_as_VarHandleOptions()) {
if (var_handle_params->container())
params->container = var_handle_params->container()->c_str();
if (var_handle_params->shared_name())
params->shared_name = var_handle_params->shared_name()->c_str();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// Below are the ops with no builtin_data structure.
case BuiltinOperator_BATCH_TO_SPACE_ND:
// TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
// ok for now, since there is no call implementation either.
case BuiltinOperator_CALL:
case BuiltinOperator_CONCAT_EMBEDDINGS:
case BuiltinOperator_COS:
case BuiltinOperator_CUSTOM:
case BuiltinOperator_ELU:
case BuiltinOperator_EMBEDDING_LOOKUP:
case BuiltinOperator_EQUAL:
case BuiltinOperator_EXP:
case BuiltinOperator_EXPAND_DIMS:
case BuiltinOperator_LOG_SOFTMAX:
case BuiltinOperator_MATRIX_DIAG:
case BuiltinOperator_MATRIX_SET_DIAG:
case BuiltinOperator_RELU_N1_TO_1:
case BuiltinOperator_SELECT:
case BuiltinOperator_SELECT_V2:
case BuiltinOperator_SLICE:
case BuiltinOperator_SPACE_TO_BATCH_ND:
case BuiltinOperator_TILE:
case BuiltinOperator_TOPK_V2:
case BuiltinOperator_TRANSPOSE:
case BuiltinOperator_POW:
case BuiltinOperator_FLOOR_DIV:
case BuiltinOperator_ZEROS_LIKE:
case BuiltinOperator_FILL:
case BuiltinOperator_FLOOR_MOD:
case BuiltinOperator_RANGE:
case BuiltinOperator_SQUARED_DIFFERENCE:
case BuiltinOperator_REVERSE_V2:
case BuiltinOperator_ADD_N:
case BuiltinOperator_GATHER_ND:
case BuiltinOperator_WHERE:
case BuiltinOperator_RANK:
case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
@@ -823,6 +837,16 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_DENSIFY:
case BuiltinOperator_SEGMENT_SUM:
case BuiltinOperator_BROADCAST_TO:
case BuiltinOperator_RFFT2D:
case BuiltinOperator_IMAG:
case BuiltinOperator_REAL:
case BuiltinOperator_COMPLEX_ABS:
case BuiltinOperator_HASHTABLE_FIND:
case BuiltinOperator_HASHTABLE_IMPORT:
case BuiltinOperator_HASHTABLE_SIZE:
case BuiltinOperator_READ_VARIABLE:
case BuiltinOperator_ASSIGN_VARIABLE:
case BuiltinOperator_BROADCAST_ARGS:
return kTfLiteOk;
case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
return kTfLiteError;
@@ -850,6 +874,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
case TensorType_INT32:
*type = kTfLiteInt32;
return kTfLiteOk;
case TensorType_UINT32:
*type = kTfLiteUInt32;
return kTfLiteOk;
case TensorType_UINT8:
*type = kTfLiteUInt8;
return kTfLiteOk;
@@ -859,6 +886,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
case TensorType_INT64:
*type = kTfLiteInt64;
return kTfLiteOk;
case TensorType_UINT64:
*type = kTfLiteUInt64;
return kTfLiteOk;
case TensorType_STRING:
*type = kTfLiteString;
return kTfLiteOk;
@@ -871,6 +901,12 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
case TensorType_COMPLEX128:
*type = kTfLiteComplex128;
return kTfLiteOk;
case TensorType_RESOURCE:
*type = kTfLiteResource;
return kTfLiteOk;
case TensorType_VARIANT:
*type = kTfLiteVariant;
return kTfLiteOk;
default:
*type = kTfLiteNoType;
TF_LITE_REPORT_ERROR(error_reporter,
@@ -912,6 +948,11 @@ TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
return kTfLiteOk;
}
TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
@@ -962,6 +1003,56 @@ TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteBatchMatMulParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) {
params->adj_x = bmm_params->adj_x();
params->adj_y = bmm_params->adj_y();
params->asymmetric_quantize_inputs =
bmm_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteCastParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->in_data_type(), &params->in_data_type, error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
&params->out_data_type,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1030,6 +1121,24 @@ TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
params->exclusive = cumsum_params->exclusive();
params->reverse = cumsum_params->reverse();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1038,6 +1147,31 @@ TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
return kTfLiteOk;
}
TfLiteStatus ParseDepthToSpace(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteDepthToSpaceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions();
if (schema_params != nullptr) {
params->block_size = schema_params->block_size();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
@@ -1082,6 +1216,29 @@ TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
return kTfLiteOk;
}
TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteDivParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1090,6 +1247,30 @@ TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1098,6 +1279,22 @@ TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFullyConnected(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
@@ -1144,6 +1341,35 @@ TfLiteStatus ParseFullyConnected(const Operator* op,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteGatherParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
params->axis = 0;
params->batch_dims = 0;
if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
params->axis = gather_params->axis();
params->batch_dims = gather_params->batch_dims();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1168,6 +1394,30 @@ TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
return kTfLiteOk;
}
TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteIfParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteIfParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const IfOptions* schema_params = op->builtin_options_as_IfOptions();
if (schema_params != nullptr) {
params->then_subgraph_index = schema_params->then_subgraph_index();
params->else_subgraph_index = schema_params->else_subgraph_index();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseL2Normalization(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
@@ -1195,6 +1445,22 @@ TfLiteStatus ParseL2Normalization(const Operator* op,
return kTfLiteOk;
}
TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* leaky_relu_params =
op->builtin_options_as_LeakyReluOptions()) {
params->alpha = leaky_relu_params->alpha();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1251,6 +1517,14 @@ TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1378,6 +1652,14 @@ TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1599,6 +1881,39 @@ TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSpaceToDepth(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSpaceToDepthParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions();
if (schema_params != nullptr) {
params->block_size = schema_params->block_size();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
@@ -1647,6 +1962,39 @@ TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSqueezeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSqueezeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions();
if (schema_params != nullptr) {
const auto* squeeze_dims = schema_params->squeeze_dims();
if (squeeze_dims != nullptr) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
error_reporter, "squeeze"));
params->num_squeeze_dims = squeeze_dims->size();
} else {
params->num_squeeze_dims = 0;
}
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1753,6 +2101,40 @@ TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
//
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseTransposeConv(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteTransposeConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const TransposeConvOptions* transpose_conv_params =
op->builtin_options_as_TransposeConvOptions();
if (transpose_conv_params != nullptr) {
params->padding = ConvertPadding(transpose_conv_params->padding());
params->stride_width = transpose_conv_params->stride_w();
params->stride_height = transpose_conv_params->stride_h();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
@@ -1779,6 +2161,14 @@ TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {

View File

@@ -75,15 +75,30 @@ TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseBatchToSpaceNd(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseConcatenation(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
@@ -95,6 +110,14 @@ TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseDepthToSpace(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
@@ -104,17 +127,48 @@ TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseElu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseExp(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseExpandDims(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFill(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseFloorDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFloorMod(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFullyConnected(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseGatherNd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -127,11 +181,18 @@ TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseL2Normalization(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -158,6 +219,10 @@ TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -186,6 +251,9 @@ TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePow(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -230,12 +298,25 @@ TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSpaceToBatchNd(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseSpaceToDepth(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -256,9 +337,22 @@ TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseTranspose(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseTransposeConv(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
} // namespace tflite
#endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_

View File

@@ -30,8 +30,7 @@ TfLiteStatus GetRegistrationFromOpCode(
auto builtin_code = GetBuiltinCode(opcode);
int version = opcode->version();
if (builtin_code > BuiltinOperator_MAX ||
builtin_code < BuiltinOperator_MIN) {
if (builtin_code > BuiltinOperator_MAX) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Op builtin_code out of range: %d. Are you using old TFLite binary "
@@ -43,7 +42,9 @@ TfLiteStatus GetRegistrationFromOpCode(
if (*registration == nullptr) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Didn't find op for builtin opcode '%s' version '%d'\n",
"Didn't find op for builtin opcode '%s' version '%d'. "
"An older version of this builtin might be supported. "
"Are you using an old TFLite binary with a newer model?\n",
EnumNameBuiltinOperator(builtin_code), version);
status = kTfLiteError;
}

View File

@@ -15,6 +15,7 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
#include <memory>
#include <vector>
#include "tensorflow/lite/c/common.h"
@@ -45,6 +46,22 @@ class OpResolver {
}
virtual ~OpResolver() {}
private:
/// Returns true if this OpResolver may contain any "user defined" ops.
/// By "user defined" ops, we mean any op definitions other than those
/// contained in tflite::ops::builtin::BuiltinOpResolver.
///
/// If this method returns true, it doesn't necessarily mean that the
/// OpResolver contains a user-defined op, just that the absence of
/// user-defined ops can't be guaranteed.
///
/// Note that "user-defined" ops are not the same as "custom" ops;
/// BuiltinOpResolver may support certain "custom" ops, in addition to
/// "builtin" ops, and may not support all of the "builtin" op enum values.
virtual bool MayContainUserDefinedOps() const { return true; }
friend class OpResolverInternal;
};
// Handles the logic for converting between an OperatorCode structure extracted

View File

@@ -1,194 +0,0 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_
#define TENSORFLOW_LITE_CORE_API_PROFILER_H_
#include <cstdint>
namespace tflite {
// A simple utility for enabling profiled event tracing in TensorFlow Lite.
class Profiler {
public:
// As certain Profiler instance might be only interested in certain event
// types, we define each event type value to allow a Profiler to use
// bitmasking bitwise operations to determine whether an event should be
// recorded or not.
enum class EventType {
// Default event type, the metadata field has no special significance.
DEFAULT = 1,
// The event is an operator invocation and the event_metadata field is the
// index of operator node.
OPERATOR_INVOKE_EVENT = 2,
// The event is an invocation for an internal operator of a TFLite delegate.
// The event_metadata field is the index of operator node that's specific to
// the delegate.
DELEGATE_OPERATOR_INVOKE_EVENT = 4,
// The event is a recording of runtime instrumentation such as the overall
// TFLite runtime status, the TFLite delegate status (if a delegate
// is applied), and the overall model inference latency etc.
// Note, the delegate status and overall status are stored as separate
// event_metadata fields. In particular, the delegate status is encoded
// as DelegateStatus::full_status().
GENERAL_RUNTIME_INSTRUMENTATION_EVENT = 8,
};
virtual ~Profiler() {}
// Signals the beginning of an event and returns a handle to the profile
// event. The `event_metadata1` and `event_metadata2` have different
// interpretations based on the actual Profiler instance and the `event_type`.
// For example, as for the 'SubgraphAwareProfiler' defined in
// lite/core/subgraph.h, when the event_type is OPERATOR_INVOKE_EVENT,
// `event_metadata1` represents the index of a TFLite node, and
// `event_metadata2` represents the index of the subgraph that this event
// comes from.
virtual uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) = 0;
// Similar w/ the above, but `event_metadata2` defaults to 0.
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata) {
return BeginEvent(tag, event_type, event_metadata, /*event_metadata2*/ 0);
}
// Signals an end to the specified profile event with 'event_metadata's, This
// is useful when 'event_metadata's are not available when the event begins
// or when one wants to overwrite the 'event_metadata's set at the beginning.
virtual void EndEvent(uint32_t event_handle, int64_t event_metadata1,
int64_t event_metadata2) {}
// Signals an end to the specified profile event.
virtual void EndEvent(uint32_t event_handle) = 0;
// Appends an event of type 'event_type' with 'tag' and 'event_metadata'
// which started at 'start' and ended at 'end'
// Note:
// In cases were ProfileSimmarizer and tensorflow::StatsCalculator are used
// they assume the value is in "usec", if in any case subclasses
// didn't put usec, then the values are not meaningful.
// TODO karimnosseir: Revisit and make the function more clear.
void AddEvent(const char* tag, EventType event_type, uint64_t start,
uint64_t end, int64_t event_metadata) {
AddEvent(tag, event_type, start, end, event_metadata,
/*event_metadata2*/ 0);
}
virtual void AddEvent(const char* tag, EventType event_type, uint64_t start,
uint64_t end, int64_t event_metadata1,
int64_t event_metadata2) {}
protected:
friend class ScopedProfile;
};
// Adds a profile event to `profiler` that begins with the construction
// of the object and ends when the object goes out of scope.
// The lifetime of tag should be at least the lifetime of `profiler`.
// `profiler` may be null, in which case nothing is profiled.
class ScopedProfile {
public:
ScopedProfile(Profiler* profiler, const char* tag,
Profiler::EventType event_type = Profiler::EventType::DEFAULT,
int64_t event_metadata = 0)
: profiler_(profiler), event_handle_(0) {
if (profiler) {
event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata);
}
}
~ScopedProfile() {
if (profiler_) {
profiler_->EndEvent(event_handle_);
}
}
protected:
Profiler* profiler_;
uint32_t event_handle_;
};
class ScopedOperatorProfile : public ScopedProfile {
public:
ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index)
: ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
static_cast<uint32_t>(node_index)) {}
};
class ScopedDelegateOperatorProfile : public ScopedProfile {
public:
ScopedDelegateOperatorProfile(Profiler* profiler, const char* tag,
int node_index)
: ScopedProfile(profiler, tag,
Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT,
static_cast<uint32_t>(node_index)) {}
};
class ScopedRuntimeInstrumentationProfile : public ScopedProfile {
public:
ScopedRuntimeInstrumentationProfile(Profiler* profiler, const char* tag)
: ScopedProfile(
profiler, tag,
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, -1) {}
void set_runtime_status(int64_t delegate_status, int64_t interpreter_status) {
if (profiler_) {
delegate_status_ = delegate_status;
interpreter_status_ = interpreter_status;
}
}
~ScopedRuntimeInstrumentationProfile() {
if (profiler_) {
profiler_->EndEvent(event_handle_, delegate_status_, interpreter_status_);
}
}
private:
int64_t delegate_status_;
int64_t interpreter_status_;
};
} // namespace tflite
#define TFLITE_VARNAME_UNIQ_IMPL(name, ctr) name##ctr
#define TFLITE_VARNAME_UNIQ(name, ctr) TFLITE_VARNAME_UNIQ_IMPL(name, ctr)
#define TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler, tag) \
tflite::ScopedProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
(profiler), (tag))
#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \
tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
(profiler), (tag), (node_index))
#define TFLITE_SCOPED_DELEGATE_OPERATOR_PROFILE(profiler, tag, node_index) \
tflite::ScopedDelegateOperatorProfile TFLITE_VARNAME_UNIQ( \
_profile_, __COUNTER__)((profiler), (tag), (node_index))
#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \
profiler, tag, delegate_status, interpreter_status) \
do { \
if (!profiler) { \
const auto handle = profiler->BeginEvent( \
tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \
delegate_status, interpreter_status); \
profiler->EndEvent(handle); \
} \
} while (false);
#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_

View File

@@ -178,14 +178,54 @@ inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
// - input x is in the range -(1<<47) <= x < (1<<47)
assert(quantized_multiplier >= 0);
assert(shift >= -31 && shift < 8);
assert(x >= -(static_cast<int64_t>(1) << 47) &&
x < (static_cast<int64_t>(1) << 47));
int32_t reduced_multiplier = (quantized_multiplier + (1 << 15)) >> 16;
int32_t reduced_multiplier = (quantized_multiplier < 0x7FFF0000)
? ((quantized_multiplier + (1 << 15)) >> 16)
: 0x7FFF;
int total_shift = 15 - shift;
x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1));
int32_t result = x >> total_shift;
return result;
}
#ifdef USE_NEON
// Round uses ARM's rounding shift right.
inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
const int left_shift = std::max(shift, 0);
const int right_shift = std::min(shift, 0);
int32x4x4_t result;
int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier);
int32x4_t left_shift_dup = vdupq_n_s32(left_shift);
int32x4_t right_shift_dup = vdupq_n_s32(right_shift);
result.val[0] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[1] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[2] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[3] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup),
multiplier_dup),
right_shift_dup);
return result;
}
#endif
template <typename T>
int CountLeadingZeros(T integer_input) {
static_assert(std::is_unsigned<T>::value,
@@ -239,79 +279,125 @@ inline Integer FloorLog2(Integer n) {
}
}
// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
// softmax
// func - the function to build the LUT for (e.g exp(x))
// min,max - table limits
// table - pointer to buffer
// num - number of elements in the LUT
inline void gen_lut(double (*func)(double), double min, double max,
int16_t* table, const int num) {
// size of table should equal to num + 1
// last element only for slope calculation
double step = (max - min) / (num - 1);
double half_step = step / 2.0;
for (int i = 0; i < num - 1; i++) {
double sample_val = TfLiteRound(func(min + i * step) * 32768.0);
double midpoint_interp_val =
TfLiteRound((func(min + (i + 1) * step) * 32768.0 +
TfLiteRound(func(min + i * step) * 32768.0)) /
2.0);
double midpoint_val =
TfLiteRound(func(min + i * step + half_step) * 32768.0);
double midpoint_err = midpoint_interp_val - midpoint_val;
double bias = TfLiteRound(midpoint_err / 2.0);
table[i] = std::min(std::max(sample_val - bias, -32768.0), 32767.0);
}
table[num - 1] =
std::min(std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0);
// The size of the LUT depends on the type of input. For int8 inputs a simple
// 256 entries LUT is used. For int16 inputs the high 9 bits are used for
// indexing and the 7 remaining bits are used for interpolation. We thus use a
// 513-entries LUT for int16 cases, 512 for the 9-bit indexing and 1 extra entry
// to interpolate the last value.
template <typename LutInT>
constexpr int lut_size() {
static_assert(std::is_same<LutInT, int8_t>::value ||
std::is_same<LutInT, int16_t>::value,
"Only LUTs with int8 or int16 inputs are supported.");
return std::is_same<LutInT, int8_t>::value ? 256 : 513;
}
// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
// softmax
// func - the function to build the LUT for (e.g exp(x))
// min,max - table limits
// table - pointer to buffer
// num - number of elements in the LUT
inline void gen_lut(float (*func)(float), float min, float max, int16_t* table,
const int num) {
// size of table should equal to num + 1
// last element only for slope calculation
float step = (max - min) / (num - 1);
float half_step = step / 2.0f;
for (int i = 0; i < num - 1; i++) {
float sample_val = TfLiteRound(func(min + i * step) * 32768.0f);
float midpoint_interp_val =
TfLiteRound((func(min + (i + 1) * step) * 32768.0f +
TfLiteRound(func(min + i * step) * 32768.0f)) /
2.0f);
float midpoint_val =
TfLiteRound(func(min + i * step + half_step) * 32768.0f);
float midpoint_err = midpoint_interp_val - midpoint_val;
float bias = TfLiteRound(midpoint_err / 2.0f);
table[i] = std::min(std::max(sample_val - bias, -32768.0f), 32767.0f);
// Generate a LUT for 'func' which can be used to approximate functions like
// exp, log, ...
//
// - func: the function to build the LUT for (e.g exp(x))
// - input_min, input_max: range of the func inputs
// - output_min, output_max: range of the func outputs
// - lut: pointer to the LUT table to fill, the table must be of size
// lut_size<LutInT>()
template <typename FloatT, typename LutInT, typename LutOutT>
inline void gen_lut(FloatT (*func)(FloatT), FloatT input_min, FloatT input_max,
FloatT output_min, FloatT output_max, LutOutT* lut) {
static_assert(std::is_same<LutInT, int8_t>::value ||
std::is_same<LutInT, int16_t>::value,
"Only LUTs with int8 or int16 inputs are supported.");
static_assert(std::is_same<LutOutT, int8_t>::value ||
std::is_same<LutOutT, int16_t>::value,
"Only LUTs with int8 or int16 outputs are supported.");
static_assert(std::is_floating_point<FloatT>::value,
"FloatT must be a floating-point type.");
const int nb_steps = std::is_same<LutInT, int8_t>::value ? 256 : 512;
const FloatT step = (input_max - input_min) / nb_steps;
const FloatT half_step = step / 2;
const FloatT output_scaling_inv =
static_cast<FloatT>(std::numeric_limits<LutOutT>::max() -
std::numeric_limits<LutOutT>::min() + 1) /
(output_max - output_min);
const FloatT table_min =
static_cast<FloatT>(std::numeric_limits<LutOutT>::min());
const FloatT table_max =
static_cast<FloatT>(std::numeric_limits<LutOutT>::max());
for (int i = 0; i < nb_steps; i++) {
const FloatT val = func(input_min + i * step);
const FloatT val_midpoint = func(input_min + i * step + half_step);
const FloatT val_next = func(input_min + (i + 1) * step);
const FloatT sample_val = TfLiteRound(val * output_scaling_inv);
const FloatT midpoint_interp_val =
TfLiteRound((val_next * output_scaling_inv +
TfLiteRound(val * output_scaling_inv)) /
2);
const FloatT midpoint_val = TfLiteRound(val_midpoint * output_scaling_inv);
const FloatT midpoint_err = midpoint_interp_val - midpoint_val;
const FloatT bias = TfLiteRound(midpoint_err / 2);
lut[i] = static_cast<LutOutT>(std::min<FloatT>(
std::max<FloatT>(sample_val - bias, table_min), table_max));
}
const bool with_extra_interpolation_value =
std::is_same<LutInT, int16_t>::value;
if (with_extra_interpolation_value) {
lut[nb_steps] = static_cast<LutOutT>(std::min<FloatT>(
std::max<FloatT>(TfLiteRound(func(input_max) * output_scaling_inv),
table_min),
table_max));
}
table[num - 1] = std::min(
std::max(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f);
}
// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) {
// 512 base value, lut[513] only for calculate slope
uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
// LUT must have 513 values
template <typename LutOutT>
inline LutOutT lut_lookup_with_interpolation(int16_t value,
const LutOutT* lut) {
static_assert(std::is_same<LutOutT, int8_t>::value ||
std::is_same<LutOutT, int16_t>::value,
"Only LUTs with int8 or int16 outputs are supported.");
// 512 base values, lut[513] is only used to calculate the slope
const uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
assert(index < 512 && "LUT index out of range.");
int16_t offset = value & 0x7f;
const int16_t offset = value & 0x7f;
// base and slope are Q0.15
int16_t base = lut[index];
int16_t slope = lut[index + 1] - lut[index];
// Base and slope are Q0.x
const LutOutT base = lut[index];
const LutOutT slope = lut[index + 1] - lut[index];
// Q0.15 * Q0.7 = Q0.22
// Round and convert from Q0.22 to Q0.15
int32_t delta = (static_cast<int32_t>(slope) * offset + 64) >> 7;
// Q0.x * Q0.7 = Q0.(x + 7)
// Round and convert from Q0.(x + 7) to Q0.x
const int delta = (slope * offset + 64) >> 7;
// Q0.15 + Q0.15
return base + delta;
return static_cast<LutOutT>(base + delta);
}
// int16_t -> int16_t table lookup with interpolation
// LUT must have 513 values
inline int16_t lut_lookup(int16_t value, const int16_t* lut) {
return lut_lookup_with_interpolation(value, lut);
}
// int16_t -> int8_t table lookup with interpolation
// LUT must have 513 values
inline int8_t lut_lookup(int16_t value, const int8_t* lut) {
return lut_lookup_with_interpolation(value, lut);
}
// int8_t -> int8_t table lookup without interpolation
// LUT must have 256 values
inline int8_t lut_lookup(int8_t value, const int8_t* lut) {
return lut[128 + value];
}
// int8_t -> int16_t table lookup without interpolation
// LUT must have 256 values
inline int16_t lut_lookup(int8_t value, const int16_t* lut) {
return lut[128 + value];
}
// Table of sigmoid(i/24) at 0.16 format - 256 elements.
@@ -533,7 +619,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
// InputIntegerBits - z_b_headroom - 0.25);
const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp(
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
InputIntegerBits - z_a_headroom_plus_1, 31 - kAccumIntegerBits)),
static_cast<int32_t>(InputIntegerBits - z_a_headroom_plus_1),
31 - kAccumIntegerBits)),
shifted_quarter);
// z_b is treated like z_a, but premultiplying by sqrt(0.5).
@@ -543,7 +630,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
InputIntegerBits - z_b_headroom, 31 - kAccumIntegerBits)),
static_cast<int32_t>(InputIntegerBits - z_b_headroom),
31 - kAccumIntegerBits)),
shifted_quarter);
const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw));

View File

@@ -19,9 +19,8 @@ limitations under the License.
namespace tflite {
#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
(defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \
defined(__ZEPHYR__)
#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
(defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(__ZEPHYR__)
#define TF_LITE_GLOBAL_STD_PREFIX
#else
#define TF_LITE_GLOBAL_STD_PREFIX std
@@ -34,6 +33,7 @@ namespace tflite {
}
DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round);
DECLARE_STD_GLOBAL_SWITCH1(TfLiteExpm1, expm1);
} // namespace tflite

View File

@@ -15,26 +15,6 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#define USE_NEON
#include <arm_neon.h>
#endif
#if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON
#define USE_NEON
#include "NEON_2_SSE.h"
#endif
// NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is
// defined, PortableSomeFunc(args) otherwise.
#ifdef USE_NEON
// Always use Neon code
#define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__)
#else
// No NEON available: Use Portable code
#define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__)
#endif // defined(USE_NEON)
// TFLM does not need to utilize any Neon optimizations.
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_

View File

@@ -15,7 +15,6 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
#include <complex>
#include <vector>
#include "tensorflow/lite/c/common.h"

View File

@@ -289,7 +289,7 @@ void PreprocessSoftmaxScaling(double beta, double input_scale,
input_beta_real_multiplier = (1ll << 31) - 1.0;
}
#else // TFLITE_EMULATE_FLOAT
const double input_beta_real_multiplier = std::min(
const double input_beta_real_multiplier = std::min<double>(
beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0);
#endif // TFLITE_EMULATE_FLOAT

View File

@@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
#include <type_traits>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/common.h"
@@ -27,25 +29,14 @@ inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
T activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
input1_data[i] + input2_data[i], params.quantized_activation_min,
params.quantized_activation_max);
}
}
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const float* input1_data,
const RuntimeShape& input2_shape, const float* input2_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
auto x = input1_data[i] + input2_data[i];
output_data[i] = ActivationFunctionWithMinMax(
x, params.float_activation_min, params.float_activation_max);
input1_data[i] + input2_data[i], activation_min, activation_max);
}
}
@@ -202,21 +193,12 @@ inline void Add(const ArithmeticParams& params,
}
}
// TODO(jiawen): We can implement BroadcastAdd on buffers of arbitrary
// dimensionality if the runtime code does a single loop over one dimension
// that handles broadcasting as the base case. The code generator would then
// generate max(D1, D2) nested for loops.
// TODO(benoitjacob): BroadcastAdd is intentionally duplicated from
// reference_ops.h. Once an optimized version is implemented and NdArrayDesc<T>
// is no longer referenced in this file, move NdArrayDesc<T> from types.h to
// reference_ops.h.
inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const float* input1_data,
const RuntimeShape& input2_shape,
const float* input2_data,
const RuntimeShape& output_shape,
float* output_data) {
template <typename T>
inline typename std::enable_if<!is_small_integer<T>::value, void>::type
BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
@@ -224,6 +206,9 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape extended_output_shape =
RuntimeShape::ExtendedShape(4, output_shape);
T activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
// trailing dimension changing most rapidly (channels has the smallest stride,
@@ -240,51 +225,10 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
output_data[Offset(extended_output_shape, b, y, x, c)] =
ActivationFunctionWithMinMax(
ActivationFunctionWithMinMax<T>(
input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
input2_data[SubscriptToIndex(desc2, b, y, x, c)],
params.float_activation_min, params.float_activation_max);
}
}
}
}
}
inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int32_t* input1_data,
const RuntimeShape& input2_shape,
const int32_t* input2_data,
const RuntimeShape& output_shape,
int32_t* output_data) {
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
&desc2);
const RuntimeShape extended_output_shape =
RuntimeShape::ExtendedShape(4, output_shape);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
// trailing dimension changing most rapidly (channels has the smallest stride,
// typically 1 element).
//
// In generated C code, we store arrays with the dimensions reversed. The
// first dimension has smallest stride.
//
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
output_data[Offset(extended_output_shape, b, y, x, c)] =
ActivationFunctionWithMinMax(
input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
input2_data[SubscriptToIndex(desc2, b, y, x, c)],
params.quantized_activation_min,
params.quantized_activation_max);
activation_min, activation_max);
}
}
}
@@ -295,10 +239,11 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
// is 32-bit for both cases. The overflow does not happen due to the
// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
template <typename T>
inline void BroadcastAdd4DSlow(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
inline typename std::enable_if<is_small_integer<T>::value, void>::type
BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,

View File

@@ -0,0 +1,86 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
#include <algorithm>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
// T is expected to be either float or int.
template <typename T>
inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs,
const T* const* input_data, T* output_data) {
// All inputs and output should have the same shape, this is checked during
// Prepare stage.
const size_t size = input_shape.FlatSize();
for (size_t i = 0; i < size; ++i) {
T x = 0;
for (size_t j = 0; j < num_inputs; ++j) {
x += input_data[j][i];
}
output_data[i] = x;
}
}
inline void AddN(const ArithmeticParams& params,
const RuntimeShape& input_shape, const size_t num_inputs,
const int8_t* const* input_data, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
// Input offset is negative input zero point. Activation tensors are
// asymmetric quantized so they span the full int8 range.
// All inputs should have same zero-point and scale, this is checked during
// Prepare stage.
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
// All inputs and output should have the same shape, this is checked during
// Prepare stage.
const size_t size = input_shape.FlatSize();
for (size_t i = 0; i < size; ++i) {
// accumulate in scaled_x before clamping to avoid overflow
const int32_t x = params.input1_offset; // x = 0
const int32_t shifted_x = x * (1 << params.left_shift);
int32_t scaled_x = MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_x, params.input1_multiplier, params.input1_shift);
for (size_t j = 0; j < num_inputs; ++j) {
const int32_t y = params.input1_offset + input_data[j][i];
const int32_t shifted_y = y * (1 << params.left_shift);
int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_y, params.input1_multiplier, params.input1_shift);
scaled_x += scaled_y;
}
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
scaled_x, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<int8_t>(clamped_output);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_

View File

@@ -15,12 +15,23 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
#include <functional>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
std::function<bool(T, T)> GetComparefunction(bool is_arg_max) {
if (is_arg_max) {
return std::greater<T>();
} else {
return std::less<T>();
}
}
template <typename T1, typename T2, typename T3, typename Cmp>
void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
const T3* input2_data, const RuntimeShape& output_shape,
@@ -62,6 +73,15 @@ void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
}
}
}
template <typename T1, typename T2, typename T3>
void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
const T3* input2_data, const RuntimeShape& output_shape,
T2* output_data, const bool is_arg_max) {
ArgMinMax(input1_shape, input1_data, input2_data, output_shape, output_data,
GetComparefunction<T1>(is_arg_max));
}
} // namespace reference_ops
} // namespace tflite

View File

@@ -0,0 +1,275 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
#include <algorithm>
#include <cstdint>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor_utils_common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
namespace batch_matmul {
// Determine which dimension is the broadcast dimension.
inline int broadcast_dim(int lhs_dim, int rhs_dim) {
if (lhs_dim == rhs_dim) return lhs_dim;
if (lhs_dim == 1) return rhs_dim;
TFLITE_DCHECK_EQ(rhs_dim, 1);
return lhs_dim;
}
// Compute the "extent" for iterating on this dimension.
// If we are broadcasting, then don't advance (i.e return 0).
inline int extent(const RuntimeShape& shape, int x) {
if (shape.Dims(x) == 1) {
return 0;
}
int prod = 1;
for (int i = x + 1; i < shape.DimensionsCount(); ++i) {
prod *= shape.Dims(i);
}
return prod;
}
} // namespace batch_matmul
template <typename Ta, typename Tb, typename Tout>
inline void BatchMatMul(const RuntimeShape& lhs_shape, const Ta* lhs_data,
const RuntimeShape& rhs_shape, const Tb* rhs_data,
const RuntimeShape& output_shape, Tout* output_data) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const Ta* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const Tb* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const Ta* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const Tb* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const Ta* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const Tb* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
Tout* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
for (int i = 0; i < lhs_rows; ++i) {
Tout total = 0;
for (int k = 0; k < accum_depth; ++k) {
total += static_cast<Tout>(lhs_ptr2[accum_depth * i + k]) *
static_cast<Tout>(rhs_ptr2[j * accum_depth + k]);
}
int idx = lhs_rows * j + i;
out_ptr[idx] = total;
}
}
}
}
}
}
inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data,
const RuntimeShape& rhs_shape, const int8_t* rhs_data,
const float* scaling_factors,
const int32_t* input_offset, int32_t* row_sums,
const RuntimeShape& output_shape, float* output_data,
bool* compute_row_sums) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
const int ioff_ext0 = rhs_ext0 == 0 ? 0 : rhs_cols;
const int ioff_ext1 = rhs_ext1 == 0 ? 0 : rhs_cols;
const int ioff_ext2 = rhs_ext2 == 0 ? 0 : rhs_cols;
const int woff_ext0 = lhs_ext0 == 0 ? 0 : lhs_rows;
const int woff_ext1 = lhs_ext1 == 0 ? 0 : lhs_rows;
const int woff_ext2 = lhs_ext2 == 0 ? 0 : lhs_rows;
if (!compute_row_sums || *compute_row_sums) {
int num_weights_matrices = 1;
for (int i = 1; i < extended_lhs_shape.DimensionsCount() - 2; ++i) {
num_weights_matrices *= extended_lhs_shape.Dims(i);
}
tensor_utils::ReductionSumVector(
lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth);
if (compute_row_sums) {
*compute_row_sums = false;
}
}
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const int8_t* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
const int32_t* ioff_ptr0 = input_offset + (b0 * ioff_ext0);
const float* scale_ptr0 = scaling_factors + (b0 * ioff_ext0);
const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const int8_t* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const int8_t* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
const int32_t* ioff_ptr1 = ioff_ptr0 + (b1 * ioff_ext1);
const float* scale_ptr1 = scale_ptr0 + (b1 * ioff_ext1);
const int32_t* woff_ptr1 = woff_ptr0 + (b1 * woff_ext1);
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const int8_t* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const int8_t* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
const int32_t* ioff_ptr2 = ioff_ptr1 + (b2 * ioff_ext2);
const float* scale_ptr2 = scale_ptr1 + (b2 * ioff_ext2);
const int32_t* woff_ptr2 = woff_ptr1 + (b2 * woff_ext2);
float* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
const float batch_scaling_factor = scale_ptr2[j];
const float batch_offset = static_cast<float>(ioff_ptr2[j]);
for (int i = 0; i < lhs_rows; ++i) {
int32_t total = 0;
for (int k = 0; k < accum_depth; ++k) {
total +=
lhs_ptr2[accum_depth * i + k] * rhs_ptr2[j * accum_depth + k];
}
int32_t row_sum = woff_ptr2[i];
total -= row_sum * batch_offset;
int idx = lhs_rows * j + i;
out_ptr[idx] += batch_scaling_factor * total;
}
}
}
}
}
}
template <typename T, typename AccumT>
inline void BatchMatMul(const FullyConnectedParams& params,
const RuntimeShape& lhs_shape, const T* lhs_data,
const RuntimeShape& rhs_shape, const T* rhs_data,
const RuntimeShape& output_shape, T* output_data) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const T* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const T* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const T* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const T* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const T* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const T* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
T* out_ptr = output_data +
((b0 * batch_dim1 * batch_dim2) + b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
for (int i = 0; i < lhs_rows; ++i) {
AccumT total = 0;
for (int k = 0; k < accum_depth; ++k) {
AccumT lhs_val = lhs_ptr2[accum_depth * i + k];
AccumT rhs_val = rhs_ptr2[accum_depth * j + k];
total += (lhs_val + filter_offset) * (rhs_val + input_offset);
}
int32_t total_scaled = MultiplyByQuantizedMultiplier(
total, output_multiplier, output_shift);
total_scaled += output_offset;
total_scaled = std::max(total_scaled, output_activation_min);
total_scaled = std::min(total_scaled, output_activation_max);
const int idx = lhs_rows * j + i;
out_ptr[idx] = static_cast<T>(total_scaled);
}
}
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_

View File

@@ -0,0 +1,101 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
#include <cmath>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
// TODO(b/135760455): Move this method anonymous namespace in a cc file.
inline RuntimeShape ExtendShapeBatchToSpace(const RuntimeShape& shape) {
if (shape.DimensionsCount() == 4) {
return shape;
}
RuntimeShape new_shape(4, 1);
new_shape.SetDim(0, shape.Dims(0));
new_shape.SetDim(1, shape.Dims(1));
new_shape.SetDim(3, shape.Dims(2));
return new_shape;
}
template <typename T>
inline void BatchToSpaceND(const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const int32_t* block_shape_data,
const RuntimeShape& unextended_input3_shape,
const int32_t* crops_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("BatchToSpaceND");
TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(),
unextended_output_shape.DimensionsCount());
const RuntimeShape input1_shape =
ExtendShapeBatchToSpace(unextended_input1_shape);
const RuntimeShape output_shape =
ExtendShapeBatchToSpace(unextended_output_shape);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_batch_size = output_shape.Dims(0);
const int depth = input1_shape.Dims(3);
const int input_width = input1_shape.Dims(2);
const int input_height = input1_shape.Dims(1);
const int input_batch_size = input1_shape.Dims(0);
const int block_shape_height = block_shape_data[0];
const int block_shape_width =
unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1;
const int crops_top = crops_data[0];
const int crops_left =
unextended_input1_shape.DimensionsCount() == 4 ? crops_data[2] : 0;
for (int in_batch = 0; in_batch < input_batch_size; ++in_batch) {
const int out_batch = in_batch % output_batch_size;
const int spatial_offset = in_batch / output_batch_size;
for (int in_h = 0; in_h < input_height; ++in_h) {
const int out_h = in_h * block_shape_height +
spatial_offset / block_shape_width - crops_top;
if (out_h < 0 || out_h >= output_height) {
continue;
}
for (int in_w = 0; in_w < input_width; ++in_w) {
const int out_w = in_w * block_shape_width +
spatial_offset % block_shape_width - crops_left;
if (out_w < 0 || out_w >= output_width) {
continue;
}
T* out = output_data + Offset(output_shape, out_batch, out_h, out_w, 0);
const T* in =
input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0);
memcpy(out, in, depth * sizeof(T));
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_

View File

@@ -23,9 +23,6 @@ namespace tflite {
namespace reference_ops {
// TODO(ycling): Refactoring. Remove BroadcastLogical and use the more
// generalized and efficient BroadcastBinaryFunction.
//
// Also appears to duplicate MinimumMaximum.
//
// R: Result type. T1: Input 1 type. T2: Input 2 type.
@@ -63,7 +60,6 @@ inline void BroadcastBinaryFunction4DSlow(
}
// R: Result type. T1: Input 1 type. T2: Input 2 type.
// TODO(renjieliu): Refactor other binary functions to use this one.
template <typename R, typename T1, typename T2>
inline void BinaryFunction(const RuntimeShape& input1_shape,
const T1* input1_data,

View File

@@ -68,8 +68,7 @@ inline void Concatenation(const ConcatenationParams& params,
}
}
// TODO(prabhumk): This is the same as the optimized implementation.
// TODO(prabhumk): The quantized implementation of concatentation isn't fully
// TODO(b/174275780): The quantized implementation of concatentation isn't fully
// quantized as it takes scale as a floating point value. This should be fixed
// when optimizng this routine further.
inline void ConcatenationWithScaling(const ConcatenationParams& params,

View File

@@ -15,16 +15,13 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
@@ -108,8 +105,8 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
uint8_t* output_data, const RuntimeShape& im2col_shape,
uint8_t* im2col_data, void* cpu_backend_context) {
(void)cpu_backend_context; // only used in optimized code.
(void)im2col_data; // only used in optimized code.
(void)im2col_shape; // only used in optimized code.
(void)im2col_data; // only used in optimized code.
(void)im2col_shape; // only used in optimized code.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;

View File

@@ -0,0 +1,175 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
#include <algorithm>
#include <cstdint>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void CumSum(const T* input_data, const RuntimeShape& shape, int32_t axis,
bool exclusive, bool reverse, T* output_data) {
const int32_t rank = shape.DimensionsCount();
TFLITE_DCHECK_GE(rank, 1);
TFLITE_DCHECK_GE(axis, 0);
TFLITE_DCHECK_LT(axis, rank);
size_t inner = 1;
size_t outer = 1;
size_t depth = 1;
for (int32_t i = 0; i < rank; i++) {
if (i < axis)
inner *= shape.Dims(i);
else if (i > axis)
outer *= shape.Dims(i);
else
depth = shape.Dims(i);
}
for (size_t outer_index = 0; outer_index < outer; outer_index++) {
size_t outer_index_adj;
if (reverse)
outer_index_adj = (outer - 1) - outer_index;
else
outer_index_adj = outer_index;
for (size_t inner_index = 0; inner_index < inner; inner_index++) {
T accumulator = 0;
size_t inner_index_adj;
if (reverse)
inner_index_adj = (inner - 1) - inner_index;
else
inner_index_adj = inner_index;
for (size_t depth_index = 0; depth_index < depth; depth_index++) {
size_t depth_index_adj;
if (reverse)
depth_index_adj = (depth - 1) - depth_index;
else
depth_index_adj = depth_index;
size_t index = outer_index_adj;
index += inner_index_adj * depth * outer;
index += depth_index_adj * outer;
if (exclusive) {
output_data[index] = accumulator;
accumulator += input_data[index];
} else {
accumulator += input_data[index];
output_data[index] = accumulator;
}
}
}
}
}
//
// Quantized INT8 CUMSUM
//
inline void CumSum(const ArithmeticParams& params, const int8_t* input_data,
const RuntimeShape& shape, int32_t axis, bool exclusive,
bool reverse, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
// Input offset is negative input zero point. Activation tensors are
// asymmetric quantized so they span the full int8 range.
// All inputs should have same zero-point and scale, this is checked during
// Prepare stage.
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
const int32_t rank = shape.DimensionsCount();
TFLITE_DCHECK_GE(rank, 1);
TFLITE_DCHECK_GE(axis, 0);
TFLITE_DCHECK_LT(axis, rank);
size_t inner = 1;
size_t outer = 1;
size_t depth = 1;
for (int32_t i = 0; i < rank; i++) {
if (i < axis)
inner *= shape.Dims(i);
else if (i > axis)
outer *= shape.Dims(i);
else
depth = shape.Dims(i);
}
for (size_t outer_index = 0; outer_index < outer; outer_index++) {
size_t outer_index_adj;
if (reverse)
outer_index_adj = (outer - 1) - outer_index;
else
outer_index_adj = outer_index;
for (size_t inner_index = 0; inner_index < inner; inner_index++) {
int32_t accumulator = params.input1_offset; // accumulator = 0
accumulator *= (1 << params.left_shift);
accumulator = MultiplyByQuantizedMultiplierSmallerThanOneExp(
accumulator, params.input1_multiplier, params.input1_shift);
size_t inner_index_adj;
if (reverse)
inner_index_adj = (inner - 1) - inner_index;
else
inner_index_adj = inner_index;
for (size_t depth_index = 0; depth_index < depth; depth_index++) {
size_t depth_index_adj;
if (reverse)
depth_index_adj = (depth - 1) - depth_index;
else
depth_index_adj = depth_index;
size_t index = outer_index_adj;
index += inner_index_adj * depth * outer;
index += depth_index_adj * outer;
const int32_t y = params.input1_offset + input_data[index];
const int32_t shifted_y = y * (1 << params.left_shift);
const int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_y, params.input1_multiplier, params.input1_shift);
int32_t scaled_output;
if (exclusive) {
scaled_output = accumulator;
accumulator += scaled_y;
} else {
accumulator += scaled_y;
scaled_output = accumulator;
}
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
scaled_output, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[index] = static_cast<int8_t>(clamped_output);
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_

View File

@@ -0,0 +1,79 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int input_depth = input_shape.Dims(3);
const int input_width = input_shape.Dims(2);
const int input_height = input_shape.Dims(1);
const int input_batch = input_shape.Dims(0);
const int output_depth = output_shape.Dims(3);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_batch = output_shape.Dims(0);
const int32_t block_size = op_params.block_size;
TFLITE_DCHECK_EQ(input_width * block_size, output_width);
TFLITE_DCHECK_EQ(input_height * block_size, output_height);
TFLITE_DCHECK_EQ(input_depth, output_depth * block_size * block_size);
TFLITE_DCHECK_EQ(input_batch, output_batch);
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_h = 0; out_h < output_height; ++out_h) {
for (int out_w = 0; out_w < output_width; ++out_w) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
const int in_d =
out_d + ((out_h % block_size) * block_size + out_w % block_size) *
output_depth;
const int in_w = out_w / block_size;
const int in_h = out_h / block_size;
const int in_b = out_b;
const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d);
const int output_index =
Offset(output_shape, out_b, out_h, out_w, out_d);
output_data[output_index] = input_data[input_index];
}
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_

View File

@@ -0,0 +1,37 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Elu(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const float val = input_data[i];
output_data[i] = val < 0.0f ? TfLiteExpm1(val) : val;
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_

View File

@@ -0,0 +1,38 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
#include <cmath>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void Exp(const T* input_data, const size_t num_elements,
T* output_data) {
ruy::profiler::ScopeLabel label("Exp");
for (size_t idx = 0; idx < num_elements; ++idx) {
output_data[idx] = std::exp(input_data[idx]);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_

View File

@@ -0,0 +1,38 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
#include <cmath>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
void Fill(const RuntimeShape& value_shape, const T* value_data,
const RuntimeShape& output_shape, T* output_data) {
TFLITE_DCHECK_EQ(value_shape.DimensionsCount(), 0);
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
output_data[i] = *value_data;
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_

View File

@@ -0,0 +1,35 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
#include <cmath>
#include <functional>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
T FloorDiv(T input1, T input2) {
return std::floor(std::divides<double>()(static_cast<double>(input1),
static_cast<double>(input2)));
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_

View File

@@ -0,0 +1,44 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
#include <cmath>
#include <functional>
namespace tflite {
namespace reference_ops {
template <typename T>
T FloorMod(T input1, T input2) {
struct FloatMod {
float operator()(const float lhs, const float rhs) const {
return std::fmod(lhs, rhs);
}
};
using ModFunc = typename std::conditional<std::is_integral<T>::value,
std::modulus<T>, FloatMod>::type;
ModFunc mod_func;
T trunc_mod = mod_func(input1, input2);
return (trunc_mod != 0) && ((input2 < 0) != (trunc_mod < 0))
? (trunc_mod + input2)
: trunc_mod;
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_

Some files were not shown because too many files have changed in this diff Show More