Fix alignment mark PSRAM issue (#2357)

* Add lock for shared PSRAM memory, use it for the relevant round steps and the Alignment Mark Task

* only allow taking a new image for the Alignment Mark while round got completed

* show success/denial of Alignment Mark Update Request

* .

---------

Co-authored-by: CaCO3 <caco@ruinelli.ch>
This commit is contained in:
CaCO3
2023-04-27 22:30:12 +02:00
committed by GitHub
parent 6defcf8d4c
commit 9b1a83c8b4
7 changed files with 109 additions and 34 deletions

View File

@@ -9,6 +9,7 @@ using namespace std;
void *shared_region = NULL;
uint32_t allocatedBytesForSTBI = 0;
std::string sharedMemoryInUseFor = "";
/** Reserve a large block in the PSRAM which will be shared between the different steps.
@@ -33,9 +34,24 @@ bool reserve_psram_shared_region(void) {
/*******************************************************************
* Memory used in Take Image (STBI)
*******************************************************************/
void psram_init_shared_memory_for_take_image_step(void) {
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Init shared memory for step 'Take Image' (STBI buffers)");
bool psram_init_shared_memory_for_take_image_step(void) {
if (sharedMemoryInUseFor != "") {
LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Shared memory in PSRAM already in use for " + sharedMemoryInUseFor + "!");
return false;
}
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Init shared memory for step 'Take Image' (STBI buffers)");
allocatedBytesForSTBI = 0;
sharedMemoryInUseFor = "TakeImage";
return true;
}
void psram_deinit_shared_memory_for_take_image_step(void) {
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Deinit shared memory for step 'Take Image' (STBI buffers)");
allocatedBytesForSTBI = 0;
sharedMemoryInUseFor = "";
}
@@ -45,7 +61,7 @@ void *psram_reserve_shared_stbi_memory(size_t size) {
if (size >= 100000) {
if ((allocatedBytesForSTBI + size) > TENSOR_ARENA_SIZE + MAX_MODEL_SIZE) { // Check if it still fits in the shared region
LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Shared memory in PSRAM too small (STBI) to fit additional " +
std::to_string(size) + "bytes! Available: " + std::to_string(TENSOR_ARENA_SIZE + MAX_MODEL_SIZE - allocatedBytesForSTBI) + " bytes!");
std::to_string(size) + " bytes! Available: " + std::to_string(TENSOR_ARENA_SIZE + MAX_MODEL_SIZE - allocatedBytesForSTBI) + " bytes!");
return NULL;
}
@@ -83,14 +99,21 @@ void psram_free_shared_stbi_memory(void *p) {
* During this step we only use the shared part of the PSRAM
* for the tmpImage.
*******************************************************************/
void *psram_reserve_shared_tmp_image_memory(void) {
void *psram_reserve_shared_tmp_image_memory(void) {
if (sharedMemoryInUseFor != "") {
LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Shared memory in PSRAM already in use for " + sharedMemoryInUseFor + "!");
return NULL;
}
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Allocating tmpImage (" + std::to_string(IMAGE_SIZE) + " bytes, use shared memory in PSRAM)...");
sharedMemoryInUseFor = "Aligning";
return shared_region; // Use 1th part of the shared memory for the tmpImage (only user)
}
void psram_free_shared_temp_image_memory(void) {
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Shared memory used for tmpImage (PSRAM, part of shared memory) is free again");
sharedMemoryInUseFor = "";
}
@@ -103,18 +126,33 @@ void psram_free_shared_temp_image_memory(void) {
* Tensor Arena. Therefore we do not need to monitor the usage.
*******************************************************************/
void *psram_get_shared_tensor_arena_memory(void) {
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Allocating Tensor Arena (" + std::to_string(TENSOR_ARENA_SIZE) + " bytes, use shared memory in PSRAM)...");
return shared_region; // Use 1th part of the shared memory for Tensor
if ((sharedMemoryInUseFor == "") || (sharedMemoryInUseFor == "Digitalization_Model")) {
sharedMemoryInUseFor = "Digitalization_Tensor";
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Allocating Tensor Arena (" + std::to_string(TENSOR_ARENA_SIZE) + " bytes, use shared memory in PSRAM)...");
return shared_region; // Use 1th part of the shared memory for Tensor
}
else {
LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Shared memory in PSRAM already in use for " + sharedMemoryInUseFor + "!");
return NULL;
}
}
void *psram_get_shared_model_memory(void) {
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Allocating Model memory (" + std::to_string(MAX_MODEL_SIZE) + " bytes, use shared memory in PSRAM)...");
return (uint8_t *)shared_region + TENSOR_ARENA_SIZE; // Use 2nd part of the shared memory (after Tensor Arena) for the model
if ((sharedMemoryInUseFor == "") || (sharedMemoryInUseFor == "Digitalization_Tensor")) {
sharedMemoryInUseFor = "Digitalization_Model";
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Allocating Model memory (" + std::to_string(MAX_MODEL_SIZE) + " bytes, use shared memory in PSRAM)...");
return (uint8_t *)shared_region + TENSOR_ARENA_SIZE; // Use 2nd part of the shared memory (after Tensor Arena) for the model
}
else {
LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Shared memory in PSRAM already in use for " + sharedMemoryInUseFor + "!");
return NULL;
}
}
void psram_free_shared_tensor_arena_and_model_memory(void) {
sharedMemoryInUseFor = "";
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Shared memory used for Tensor Arena and model (PSRAM, part of shared memory) is free again");
}
@@ -156,7 +194,7 @@ void *calloc_psram_heap(std::string name, size_t n, size_t size, uint32_t caps)
ptr = heap_caps_calloc(n, size, caps);
if (ptr != NULL) {
LogFile.WriteToFile(ESP_LOG_INFO, TAG, "Allocated " + to_string(size) + " bytes in PSRAM for '" + name + "'");
LogFile.WriteToFile(ESP_LOG_DEBUG, TAG, "Allocated " + to_string(size) + " bytes in PSRAM for '" + name + "'");
}
else {
LogFile.WriteToFile(ESP_LOG_ERROR, TAG, "Failed to allocate " + to_string(size) + " bytes in PSRAM for '" + name + "'!");