mirror of
https://github.com/sle118/squeezelite-esp32.git
synced 2025-12-06 11:36:59 +03:00
Merge branch 'master-v4.3' into led_visu-v4.3
This commit is contained in:
13
.github/workflows/Platform_build.yml
vendored
13
.github/workflows/Platform_build.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
name: Global setup
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: sle118/squeezelite-esp32-idfv43
|
||||
image: sle118/squeezelite-esp32-idfv435
|
||||
outputs:
|
||||
build_number: ${{ steps.buildnumber.outputs.build_number }}
|
||||
ui_build: ${{ steps.build_flags.outputs.ui_build }}
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: sle118/squeezelite-esp32-idfv43
|
||||
image: sle118/squeezelite-esp32-idfv435
|
||||
needs: [bootstrap]
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -127,7 +127,7 @@ jobs:
|
||||
. /opt/esp/python_env/idf4.3_py3.8_env/bin/activate
|
||||
git config --global --add safe.directory /__w/squeezelite-esp32/squeezelite-esp32
|
||||
git status
|
||||
build_tools.py environment --build ${{ needs.bootstrap.outputs.build_number }} --env_file "$GITHUB_ENV" --node "${{matrix.node}}" --depth ${{matrix.depth}} --major 2 --docker sle118/squeezelite-esp32-idfv43
|
||||
build_tools.py environment --build ${{ needs.bootstrap.outputs.build_number }} --env_file "$GITHUB_ENV" --node "${{matrix.node}}" --depth ${{matrix.depth}} --major 2 --docker sle118/squeezelite-esp32-idfv435
|
||||
|
||||
- uses: actions/download-artifact@master
|
||||
name: Restore common objects
|
||||
@@ -137,11 +137,6 @@ jobs:
|
||||
if: ${{ needs.bootstrap.outputs.mock == 0 }}
|
||||
run: |
|
||||
. ${IDF_PYTHON_ENV_PATH}/bin/activate
|
||||
chmod +x ./components/spotify/cspot/bell/external/nanopb/generator/protoc
|
||||
chmod +x ./components/spotify/cspot/bell/external/nanopb/generator/protoc-gen-nanopb
|
||||
chmod +x ./components/spotify/cspot/bell/external/nanopb/generator/*.py
|
||||
chmod +x ./components/spotify/cspot/bell/external/nanopb/generator/*.py2
|
||||
chmod +x ./components/spotify/cspot/bell/external/nanopb/generator/proto/*.py
|
||||
echo "Copying target sdkconfig"
|
||||
cp build-scripts/${TARGET_BUILD_NAME}-sdkconfig.defaults sdkconfig
|
||||
echo "Building project"
|
||||
@@ -230,4 +225,4 @@ jobs:
|
||||
if: ${{ always() && !cancelled() && needs.bootstrap.outputs.release_flag == 1 && needs.bootstrap.outputs.mock == 0 }}
|
||||
uses: ./.github/workflows/web_deploy.yml
|
||||
secrets:
|
||||
WEB_INSTALLER: ${{ secrets.WEB_INSTALLER }}
|
||||
WEB_INSTALLER: ${{ secrets.WEB_INSTALLER }}
|
||||
|
||||
16
.github/workflows/codeql-analysis.yml
vendored
16
.github/workflows/codeql-analysis.yml
vendored
@@ -19,6 +19,7 @@ on:
|
||||
branches: [ master-cmake ]
|
||||
schedule:
|
||||
- cron: '19 12 * * 4'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
@@ -39,7 +40,7 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -50,7 +51,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -62,6 +63,13 @@ jobs:
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
# Exclude specific artifacts from analysis
|
||||
- name: Exclude Artifacts
|
||||
run: |
|
||||
# Exclude components/wifi-manager/webapp/dist/js/index* from analysis
|
||||
echo 'components/wifi-manager/webapp/dist/js/index*' >> .codeql-exclude-paths
|
||||
echo 'components/wifi-manager/webapp/dist/js/index*' >> .codeql-exclude-paths.txt
|
||||
echo 'components/wifi-manager/webapp/dist/index*' >> .codeql-exclude-paths
|
||||
echo 'components/wifi-manager/webapp/dist/index*' >> .codeql-exclude-paths.txt
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
120
CMakeLists.txt
120
CMakeLists.txt
@@ -1,8 +1,13 @@
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
set(EXTRA_COMPONENT_DIRS components/platform_console/app_recovery components/platform_console/app_squeezelite )
|
||||
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
|
||||
|
||||
set(PROJECT_VER $ENV{PROJECT_VER})
|
||||
add_definitions(-DMODEL_NAME=SqueezeESP32)
|
||||
|
||||
if(NOT DEFINED DEPTH)
|
||||
set(DEPTH "16")
|
||||
endif()
|
||||
|
||||
# State machine hierarchy enabled and logging enabled
|
||||
add_definitions(-DSTATE_MACHINE_LOGGER=1)
|
||||
add_definitions(-DHIERARCHICAL_STATES=1)
|
||||
@@ -18,14 +23,113 @@ add_definitions(-DHIERARCHICAL_STATES=1)
|
||||
#add_definitions(-DNETWORK_MANAGER_LOG_LEVEL=ESP_LOG_DEBUG)
|
||||
#add_definitions(-DNETWORK_HTTP_SERVER_LOG_LEVEL=ESP_LOG_DEBUG)
|
||||
|
||||
if(NOT DEFINED DEPTH)
|
||||
set(DEPTH "16")
|
||||
endif()
|
||||
message(STATUS "Building RECOVERY")
|
||||
# utility to build sizes
|
||||
function(build_size target_name)
|
||||
set(target_elf ${target_name}.elf)
|
||||
set(target_map ${target_name}.map)
|
||||
set(idf_size ${python} ${IDF_PATH}/tools/idf_size.py)
|
||||
|
||||
if(DEFINED OUTPUT_JSON AND OUTPUT_JSON)
|
||||
list(APPEND idf_size "--json")
|
||||
endif()
|
||||
|
||||
add_custom_target(size-${target_name} ALL
|
||||
DEPENDS ${target_elf}
|
||||
COMMAND ${idf_size} ${target_map} -o "size-${target_name}"
|
||||
)
|
||||
|
||||
add_custom_target(size-files-${target_name} ALL
|
||||
DEPENDS ${target_elf}
|
||||
COMMAND ${idf_size} --files ${target_map}
|
||||
)
|
||||
|
||||
add_custom_target(size-components-${target_name} ALL
|
||||
DEPENDS ${target_elf}
|
||||
COMMAND ${idf_size} --archives ${target_map}
|
||||
)
|
||||
endfunction()
|
||||
|
||||
# manually add the 2 versions for application: recovery and squeezelite
|
||||
set(EXTRA_COMPONENT_DIRS components/platform_console/app_recovery components/platform_console/app_squeezelite )
|
||||
|
||||
project(recovery)
|
||||
set_property(TARGET recovery.elf PROPERTY RECOVERY_PREFIX app_recovery )
|
||||
include(squeezelite.cmake)
|
||||
set(PROJECT_VER $ENV{PROJECT_VER})
|
||||
|
||||
# we need own "esp_app_desc" to take precedence
|
||||
add_custom_command(
|
||||
TARGET recovery.elf
|
||||
PRE_LINK
|
||||
COMMAND xtensa-esp32-elf-objcopy --weaken-symbol esp_app_desc ${BUILD_DIR}/esp-idf/app_update/libapp_update.a
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
# when building recovery, add app_recovery to the link
|
||||
get_target_property(BCA recovery.elf LINK_LIBRARIES)
|
||||
list(REMOVE_ITEM BCA "idf::app_squeezelite" "idf::app_recovery" "-Wl,--Map=${BUILD_DIR}/recovery.map")
|
||||
set_target_properties(recovery.elf PROPERTIES LINK_LIBRARIES "${BCA};idf::app_recovery;-Wl,--Map=${BUILD_DIR}/recovery.map")
|
||||
|
||||
# create files with size for recovery
|
||||
# build_size(recovery)
|
||||
|
||||
|
||||
# build squeezelite, add app_squeezelite to the link
|
||||
add_executable(squeezelite.elf "CMakeLists.txt")
|
||||
add_dependencies(squeezelite.elf recovery.elf)
|
||||
set_target_properties(squeezelite.elf PROPERTIES LINK_LIBRARIES "${BCA};idf::app_squeezelite;-Wl,--Map=${BUILD_DIR}/squeezelite.map")
|
||||
add_custom_command(
|
||||
TARGET squeezelite.elf
|
||||
POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "Generating ${BUILD_DIR}/squeezelite.bin"
|
||||
COMMAND ${ESPTOOLPY} elf2image ${ESPTOOLPY_FLASH_OPTIONS} ${ESPTOOLPY_ELF2IMAGE_OPTIONS} -o "squeezelite.bin" "squeezelite.elf"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
# create files with size for squeezelite
|
||||
# build_size(squeezelite)
|
||||
|
||||
# make it part of cleanup
|
||||
set_property(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
APPEND PROPERTY
|
||||
ADDITIONAL_MAKE_CLEAN_FILES "${BUILD_DIR}/squeezelite.elf" "${BUILD_DIR}/squeezelite.map"
|
||||
)
|
||||
|
||||
# adding OTA_0 partition
|
||||
partition_table_get_partition_info(otaapp_offset "--partition-type app --partition-subtype ota_0" "offset")
|
||||
idf_component_get_property(main_args esptool_py FLASH_ARGS)
|
||||
idf_component_get_property(sub_args esptool_py FLASH_SUB_ARGS)
|
||||
esptool_py_flash_target(squeezelite-flash "${main_args}" "${sub_args}")
|
||||
esptool_py_flash_target_image(squeezelite-flash squeezelite "${otaapp_offset}" "${BUILD_DIR}/squeezelite.bin")
|
||||
esptool_py_flash_target_image(flash squeezelite "${otaapp_offset}" "${BUILD_DIR}/squeezelite.bin")
|
||||
|
||||
# and JTAG scripts
|
||||
add_custom_target(_jtag_scripts ALL
|
||||
BYPRODUCTS "flash_dbg_project_args"
|
||||
POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_SOURCE_DIR}/generate_debug_scripts.cmake"
|
||||
)
|
||||
|
||||
if(CMAKE_HOST_UNIX)
|
||||
# Add custom target to set executable permissions before build for cspot component
|
||||
add_custom_target(set_cspot_permissions
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "************************************************************************************************"
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "**** Setting permissions for required files"
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "**** ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/protoc-gen-nanopb"
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "**** ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/*.py"
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "**** ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/*.py2"
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "**** ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/proto/*.py"
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "**** ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/protoc"
|
||||
COMMAND chmod +x ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/protoc-gen-nanopb
|
||||
COMMAND chmod +x ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/*.py
|
||||
COMMAND chmod +x ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/*.py2
|
||||
COMMAND chmod +x ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/proto/*.py
|
||||
COMMAND chmod +x ${CMAKE_SOURCE_DIR}/components/spotify/cspot/bell/external/nanopb/generator/protoc
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "************************************************************************************************"
|
||||
)
|
||||
|
||||
# Add a dependency to ensure permissions are set before building cspot component
|
||||
add_dependencies(__idf_spotify set_cspot_permissions)
|
||||
endif()
|
||||
|
||||
# ======================= DEBUG FLAGS ============================
|
||||
|
||||
#target_compile_definitions(__idf_esp_eth PRIVATE -DLOG_LOCAL_LEVEL=ESP_LOG_INFO)
|
||||
|
||||
|
||||
74
Dockerfile
74
Dockerfile
@@ -2,29 +2,34 @@ FROM ubuntu:20.04
|
||||
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV GCC_TOOLS_BASE=/opt/esp/tools/xtensa-esp32-elf/esp-2021r2-8.4.0/xtensa-esp32-elf/bin/xtensa-esp32-elf-
|
||||
ENV GCC_TOOLS_BASE=/opt/esp/tools/xtensa-esp32-elf/esp-2021r2-patch3-8.4.0/xtensa-esp32-elf/bin/xtensa-esp32-elf-
|
||||
# To build the image for a branch or a tag of IDF, pass --build-arg IDF_CLONE_BRANCH_OR_TAG=name.
|
||||
# To build the image with a specific commit ID of IDF, pass --build-arg IDF_CHECKOUT_REF=commit-id.
|
||||
# It is possibe to combine both, e.g.:
|
||||
# IDF_CLONE_BRANCH_OR_TAG=release/vX.Y
|
||||
# IDF_CHECKOUT_REF=<some commit on release/vX.Y branch>.
|
||||
# The following commit contains the ldgen fix: eab738c79e063b3d6f4c345ea5e1d4f8caef725b
|
||||
# to build an image using that commit: docker build . --build-arg IDF_CHECKOUT_REF=eab738c79e063b3d6f4c345ea5e1d4f8caef725b -t sle118/squeezelite-esp32-idfv43
|
||||
# Docker build for release 4.3.2 as of 2022/02/28
|
||||
# docker build . --build-arg IDF_CHECKOUT_REF=8bf14a9238329954c7c5062eeeda569529aedf75 -t sle118/squeezelite-esp32-idfv43
|
||||
# To run the image interactive (windows):
|
||||
# docker run --rm -v %cd%:/project -w /project -it sle118/squeezelite-esp32-idfv43
|
||||
# To run the image interactive (linux):
|
||||
# docker run --rm -v `pwd`:/project -w /project -it sle118/squeezelite-esp32-idfv43
|
||||
# Docker build for release 4.3.5 as of 2023/05/18
|
||||
# docker build . --build-arg IDF_CHECKOUT_REF=6d04316cbe4dc35ea7e4885e9821bd9958ac996d -t sle118/squeezelite-esp32-idfv435
|
||||
# Updating the docker image in the repository
|
||||
# docker push sle118/squeezelite-esp32-idfv435
|
||||
# or to do both:
|
||||
# docker build . --build-arg IDF_CHECKOUT_REF=6d04316cbe4dc35ea7e4885e9821bd9958ac996d -t sle118/squeezelite-esp32-idfv435 && docker push sle118/squeezelite-esp32-idfv435
|
||||
#
|
||||
# (windows) To run the image interactive :
|
||||
# docker run --rm -v %cd%:/project -w /project -it sle118/squeezelite-esp32-idfv435
|
||||
# (windows powershell)
|
||||
# docker run --rm -v ${PWD}:/project -w /project -it sle118/squeezelite-esp32-idfv435
|
||||
# (linux) To run the image interactive :
|
||||
# docker run --rm -v `pwd`:/project -w /project -it sle118/squeezelite-esp32-idfv435
|
||||
# to build the web app inside of the interactive session
|
||||
# pushd components/wifi-manager/webapp/ && npm install && npm run-script build && popd
|
||||
#
|
||||
# to run the docker with netwotrk port published on the host:
|
||||
# docker run --rm -p 5000:5000/tcp -v %cd%:/project -w /project -it sle118/squeezelite-esp32-idfv43
|
||||
# docker run --rm -p 5000:5000/tcp -v %cd%:/project -w /project -it sle118/squeezelite-esp32-idfv435
|
||||
|
||||
ARG IDF_CLONE_URL=https://github.com/espressif/esp-idf.git
|
||||
ARG IDF_CLONE_BRANCH_OR_TAG=master
|
||||
ARG IDF_CHECKOUT_REF=8bf14a9238329954c7c5062eeeda569529aedf75
|
||||
ARG IDF_CHECKOUT_REF=6d04316cbe4dc35ea7e4885e9821bd9958ac996d
|
||||
|
||||
ENV IDF_PATH=/opt/esp/idf
|
||||
ENV IDF_TOOLS_PATH=/opt/esp
|
||||
@@ -35,6 +40,7 @@ RUN : \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
apt-utils \
|
||||
build-essential \
|
||||
bison \
|
||||
ca-certificates \
|
||||
ccache \
|
||||
@@ -42,22 +48,26 @@ RUN : \
|
||||
curl \
|
||||
flex \
|
||||
git \
|
||||
git-lfs \
|
||||
gperf \
|
||||
lcov \
|
||||
libbsd-dev \
|
||||
libpython3.8 \
|
||||
libffi-dev \
|
||||
libncurses-dev \
|
||||
libpython2.7 \
|
||||
libusb-1.0-0-dev \
|
||||
make \
|
||||
ninja-build \
|
||||
python3 \
|
||||
python3.8 \
|
||||
python3-pip \
|
||||
python3-venv \
|
||||
ruby \
|
||||
unzip \
|
||||
wget \
|
||||
xz-utils \
|
||||
zip \
|
||||
npm \
|
||||
nodejs \
|
||||
npm \
|
||||
nodejs \
|
||||
&& apt-get autoremove -y \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& update-alternatives --install /usr/bin/python python /usr/bin/python3 10 \
|
||||
@@ -97,8 +107,8 @@ RUN : \
|
||||
COPY docker/patches $IDF_PATH
|
||||
|
||||
#set idf environment variabies
|
||||
ENV PATH /opt/esp/idf/components/esptool_py/esptool:/opt/esp/idf/components/espcoredump:/opt/esp/idf/components/partition_table:/opt/esp/idf/components/app_update:/opt/esp/tools/xtensa-esp32-elf/esp-2021r2-8.4.0/xtensa-esp32-elf/bin:/opt/esp/tools/xtensa-esp32s2-elf/esp-2021r2-8.4.0/xtensa-esp32s2-elf/bin:/opt/esp/tools/xtensa-esp32s3-elf/esp-2021r2-8.4.0/xtensa-esp32s3-elf/bin:/opt/esp/tools/riscv32-esp-elf/esp-2021r2-8.4.0/riscv32-esp-elf/bin:/opt/esp/tools/esp32ulp-elf/2.28.51-esp-20191205/esp32ulp-elf-binutils/bin:/opt/esp/tools/esp32s2ulp-elf/2.28.51-esp-20191205/esp32s2ulp-elf-binutils/bin:/opt/esp/tools/cmake/3.16.4/bin:/opt/esp/tools/openocd-esp32/v0.10.0-esp32-20211111/openocd-esp32/bin:/opt/esp/python_env/idf4.3_py3.8_env/bin:/opt/esp/idf/tools:$PATH
|
||||
ENV GCC_TOOLS_BASE="/opt/esp/tools/xtensa-esp32-elf/esp-2021r2-8.4.0/xtensa-esp32-elf/bin/xtensa-esp32-elf-"
|
||||
ENV PATH /opt/esp/idf/components/esptool_py/esptool:/opt/esp/idf/components/espcoredump:/opt/esp/idf/components/partition_table:/opt/esp/idf/components/app_update:/opt/esp/tools/xtensa-esp32-elf/esp-2021r2-patch3-8.4.0/xtensa-esp32-elf/bin:/opt/esp/tools/xtensa-esp32s2-elf/esp-2021r2-patch3-8.4.0/xtensa-esp32s2-elf/bin:/opt/esp/tools/xtensa-esp32s3-elf/esp-2021r2-patch3-8.4.0/xtensa-esp32s3-elf/bin:/opt/esp/tools/riscv32-esp-elf/esp-2021r2-patch3-8.4.0/riscv32-esp-elf/bin:/opt/esp/tools/esp32ulp-elf/2.28.51-esp-20191205/esp32ulp-elf-binutils/bin:/opt/esp/tools/esp32s2ulp-elf/2.28.51-esp-20191205/esp32s2ulp-elf-binutils/bin:/opt/esp/tools/cmake/3.16.4/bin:/opt/esp/tools/openocd-esp32/v0.11.0-esp32-20220706/openocd-esp32/bin:/opt/esp/python_env/idf4.3_py3.8_env/bin:/opt/esp/idf/tools:$PATH
|
||||
ENV GCC_TOOLS_BASE="/opt/esp/tools/xtensa-esp32-elf/esp-2021r2-patch3-8.4.0/xtensa-esp32-elf/bin/xtensa-esp32-elf-"
|
||||
ENV IDF_PATH="/opt/esp/idf"
|
||||
ENV IDF_PYTHON_ENV_PATH="/opt/esp/python_env/idf4.3_py3.8_env"
|
||||
ENV IDF_TOOLS_EXPORT_CMD="/opt/esp/idf/export.sh"
|
||||
@@ -109,7 +119,32 @@ ENV NODE_VERSION="8"
|
||||
ENV OPENOCD_SCRIPTS="/opt/esp/tools/openocd-esp32/v0.10.0-esp32-20211111/openocd-esp32/share/openocd/scripts"
|
||||
# Ccache is installed, enable it by default
|
||||
|
||||
# The constraint file has been downloaded and the right Python package versions installed. No need to check and
|
||||
# download this at every invocation of the container.
|
||||
ENV IDF_PYTHON_CHECK_CONSTRAINTS=no
|
||||
|
||||
# Ccache is installed, enable it by default
|
||||
ENV IDF_CCACHE_ENABLE=1
|
||||
|
||||
# Install QEMU runtime dependencies
|
||||
RUN : \
|
||||
&& apt-get update && apt-get install -y -q \
|
||||
libglib2.0-0 \
|
||||
libpixman-1-0 \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& :
|
||||
|
||||
# Install QEMU
|
||||
ARG QEMU_VER=esp-develop-20220919
|
||||
ARG QEMU_DIST=qemu-${QEMU_VER}.tar.bz2
|
||||
ARG QEMU_SHA256=f6565d3f0d1e463a63a7f81aec94cce62df662bd42fc7606de4b4418ed55f870
|
||||
RUN : \
|
||||
&& wget --no-verbose https://github.com/espressif/qemu/releases/download/${QEMU_VER}/${QEMU_DIST} \
|
||||
&& echo "${QEMU_SHA256} *${QEMU_DIST}" | sha256sum --check --strict - \
|
||||
&& tar -xf ${QEMU_DIST} -C /opt \
|
||||
&& rm ${QEMU_DIST} \
|
||||
&& :
|
||||
|
||||
COPY docker/entrypoint.sh /opt/esp/entrypoint.sh
|
||||
COPY components/wifi-manager/webapp/package.json /opt
|
||||
|
||||
@@ -142,6 +177,9 @@ RUN : \
|
||||
&& node --version \
|
||||
&& npm install -g \
|
||||
&& :
|
||||
RUN : \
|
||||
&& npm install -g html-webpack-plugin
|
||||
|
||||
|
||||
ENV NODE_PATH $NVM_DIR/v$NODE_VERSION/lib/node_modules
|
||||
ENV PATH $IDF_PYTHON_ENV_PATH:$NVM_DIR/v$NODE_VERSION/bin:$PATH
|
||||
@@ -152,5 +190,7 @@ RUN : \
|
||||
&& chmod +x /usr/sbin/build_tools.py \
|
||||
&& :
|
||||
|
||||
|
||||
|
||||
ENTRYPOINT [ "/opt/esp/entrypoint.sh" ]
|
||||
CMD [ "/bin/bash" ]
|
||||
|
||||
10
README.md
10
README.md
@@ -83,13 +83,13 @@ NB: You can use the pre-build binaries Muse4MBFlash which has all the hardware I
|
||||
- target: `muse`
|
||||
- bat_config: `channel=5,scale=7.48,atten=3,cells=1`
|
||||
- spi_config: `"mosi=15,miso=2,clk=14` *(this one is probably optional)*
|
||||
- dac_config: `model=I2S,bck=5,ws=25,do=26,di=35,i2c=16,sda=18,scl=23,mck`
|
||||
- dac_config: `model=I2S,bck=5,ws=25,do=26,di=35,i2c=16,sda=18,scl=23,mck=0`
|
||||
- dac_controlset: `{"init":[ {"reg":0,"val":128}, {"reg":0,"val":0}, {"reg":25,"val":4}, {"reg":1,"val":80}, {"reg":2,"val":0}, {"reg":8,"val":0}, {"reg":4,"val":192}, {"reg":0,"val":18}, {"reg":1,"val":0}, {"reg":23,"val":24}, {"reg":24,"val":2}, {"reg":38,"val":9}, {"reg":39,"val":144}, {"reg":42,"val":144}, {"reg":43,"val":128}, {"reg":45,"val":128}, {"reg":27,"val":0}, {"reg":26,"val":0}, {"reg":2,"val":240}, {"reg":2,"val":0}, {"reg":29,"val":28}, {"reg":4,"val":48}, {"reg":25,"val":0}, {"reg":46,"val":33}, {"reg":47,"val":33} ]}`
|
||||
- actrls_config: buttons
|
||||
- define a "buttons" variable with: `[{"gpio":32, "pull":true, "debounce":10, "normal":{"pressed":"ACTRLS_VOLDOWN"}}, {"gpio":19, "pull":true, "debounce":40, "normal":{"pressed":"ACTRLS_VOLUP"}}, {"gpio":12, "pull":true, "debounce":40, "long_press":1000, "normal":{"pressed":"ACTRLS_TOGGLE"},"longpress":{"pressed":"ACTRLS_POWER"}}]`
|
||||
|
||||
### ESP32-A1S
|
||||
Works with [ESP32-A1S](https://docs.ai-thinker.com/esp32-a1s) module that includes audio codec and headset output. You still need to use a demo board like [this](https://www.aliexpress.com/item/4001060963585.html) or an external amplifier if you want direct speaker connection. Note that there is a version with AC101 codec and another one with ES8388 with probably two variants - these boards are a mess (see below)
|
||||
Works with [ESP32-A1S](https://docs.ai-thinker.com/esp32-a1s) module that includes audio codec and headset output. You still need to use a demo board like [this](https://aliexpress.com/item/4000130915903.html) or an external amplifier if you want direct speaker connection. Note that there is a version with AC101 codec and another one with ES8388 with probably two variants - these boards are a mess (see below)
|
||||
|
||||
The board shown above has the following IO set
|
||||
- amplifier: GPIO21
|
||||
@@ -174,9 +174,9 @@ Default and only "host" is 1 as others are used already by flash and spiram. The
|
||||
### DAC/I2S
|
||||
The NVS parameter "dac_config" set the gpio used for i2s communication with your DAC. You can define the defaults at compile time but nvs parameter takes precedence except for SqueezeAMP and A1S where these are forced at runtime. Syntax is
|
||||
```
|
||||
bck=<gpio>,ws=<gpio>,do=<gpio>[,mck][,mute=<gpio>[:0|1][,model=TAS57xx|TAS5713|AC101|I2S][,sda=<gpio>,scl=gpio[,i2c=<addr>]]
|
||||
bck=<gpio>,ws=<gpio>,do=<gpio>[,mck=0|1|2][,mute=<gpio>[:0|1][,model=TAS57xx|TAS5713|AC101|I2S][,sda=<gpio>,scl=<gpio>[,i2c=<addr>]]
|
||||
```
|
||||
if "model" is not set or is not recognized, then default "I2S" is used. The option "mck" is used for some codecs that require a master clock (although they should not). Only GPIO0 can be used as MCLK and be aware that this cannot coexit with RMII Ethernet (see ethernet section below). I2C parameters are optional and only needed if your DAC requires an I2C control (See 'dac_controlset' below). Note that "i2c" parameters are decimal, hex notation is not allowed.
|
||||
if "model" is not set or is not recognized, then default "I2S" is used. The option "mck" is used for some codecs that require a master clock (although they should not). By default GPIO0 is used as MCLK and only recent builds (none as or 2023/05/15) can use 1 or 2. Also be aware that this cannot coexit with RMII Ethernet (see ethernet section below). I2C parameters are optional and only needed if your DAC requires an I2C control (See 'dac_controlset' below). Note that "i2c" parameters are decimal, hex notation is not allowed.
|
||||
|
||||
So far, TAS57xx, TAS5713, AC101, WM8978 and ES8388 are recognized models where the proper init sequence/volume/power controls are sent. For other codecs that might require an I2C commands, please use the parameter "dac_controlset" that allows definition of simple commands to be sent over i2c for init, power, speakder and headset on and off using a JSON syntax:
|
||||
```json
|
||||
@@ -305,7 +305,7 @@ See [set_GPIO](#set-gpio) for how to set the green and red LEDs. In addition, th
|
||||
```
|
||||
[green=0..100][,red=0..100]
|
||||
```
|
||||
NB: For well-known configuration, this is ignored
|
||||
NB: For well-known configuration, GPIO affected to green and red LED cannot be changed but brightness option applies
|
||||
|
||||
### LED Strip
|
||||
One LED strip with up to 255 addressable LEDs can be configured to offer enhanced visualizations. The LED strip can also be controlled remotely though the LMS server (using the CLI interface). Currently only WS2812B LEDs are supported. Set the LED Strip configuration (or NVS led_vu_config) to `WS2812,length=<n>,gpio=<gpio>, where <n> is the number of leds in the strip (1..255), and <gpio> is the data pin.`
|
||||
|
||||
@@ -216,6 +216,7 @@ CONFIG_TARGET=""
|
||||
# I2S settings
|
||||
#
|
||||
CONFIG_I2S_NUM=0
|
||||
CONFIG_I2S_MCK_IO=-1
|
||||
CONFIG_I2S_BCK_IO=-1
|
||||
CONFIG_I2S_WS_IO=-1
|
||||
CONFIG_I2S_DO_IO=-1
|
||||
|
||||
@@ -217,6 +217,7 @@ CONFIG_TARGET="muse"
|
||||
# I2S settings
|
||||
#
|
||||
CONFIG_I2S_NUM=0
|
||||
CONFIG_I2S_MCK_IO=-1
|
||||
CONFIG_I2S_BCK_IO=-1
|
||||
CONFIG_I2S_WS_IO=-1
|
||||
CONFIG_I2S_DO_IO=-1
|
||||
|
||||
@@ -209,6 +209,7 @@ CONFIG_TARGET="squeezeamp"
|
||||
# I2S settings
|
||||
#
|
||||
CONFIG_I2S_NUM=0
|
||||
CONFIG_I2S_MCK_IO=-1
|
||||
CONFIG_I2S_BCK_IO=-1
|
||||
CONFIG_I2S_WS_IO=-1
|
||||
CONFIG_I2S_DO_IO=-1
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
set(lib_dir ${build_dir}/esp-idf)
|
||||
if(IDF_TARGET STREQUAL esp32)
|
||||
set(lib_dir ${build_dir}/esp-idf)
|
||||
set(driver esp32/i2s.c esp32/i2s_hal.c)
|
||||
string(REPLACE ".c" ".c.obj" driver_obj "${driver}")
|
||||
|
||||
set(driver i2s.c i2s_hal.c spi_bus_lock.c)
|
||||
string(REPLACE ".c" ".c.obj" driver_obj "${driver}")
|
||||
idf_component_register( SRCS ${driver}
|
||||
REQUIRES driver
|
||||
INCLUDE_DIRS ${IDF_PATH}/components/driver
|
||||
PRIV_INCLUDE_DIRS ${IDF_PATH}/components/driver/include/driver
|
||||
)
|
||||
|
||||
idf_component_register( SRCS ${driver}
|
||||
REQUIRES driver
|
||||
INCLUDE_DIRS ${IDF_PATH}/components/driver
|
||||
PRIV_INCLUDE_DIRS ${IDF_PATH}/components/driver/include/driver
|
||||
)
|
||||
|
||||
# CMake is just a pile of crap
|
||||
message("!! overriding ${driver} !!")
|
||||
message("CAREFUL, LIBRARIES STRIPPING FROM DUPLICATED COMPONENTS DEPENDS ON THIS BEING REBUILD")
|
||||
# CMake is just a pile of crap
|
||||
message(STATUS "!! overriding ${driver} !!")
|
||||
message(STATUS "CAREFUL, LIBRARIES STRIPPING FROM DUPLICATED COMPONENTS DEPENDS ON THIS BEING REBUILD")
|
||||
|
||||
add_custom_command(
|
||||
TARGET ${COMPONENT_LIB}
|
||||
PRE_LINK
|
||||
COMMAND xtensa-esp32-elf-ar -d ${lib_dir}/driver/libdriver.a ${driver_obj}
|
||||
VERBATIM
|
||||
)
|
||||
add_custom_command(
|
||||
TARGET ${COMPONENT_LIB}
|
||||
PRE_LINK
|
||||
COMMAND xtensa-esp32-elf-ar -d ${lib_dir}/driver/libdriver.a ${driver_obj}
|
||||
VERBATIM
|
||||
)
|
||||
endif()
|
||||
|
||||
@@ -38,7 +38,8 @@
|
||||
#include "esp_attr.h"
|
||||
#include "esp_log.h"
|
||||
#include "esp_pm.h"
|
||||
#include "esp_efuse.h"
|
||||
#include "soc/chip_revision.h"
|
||||
#include "hal/efuse_hal.h"
|
||||
#include "esp_rom_gpio.h"
|
||||
|
||||
#include "sdkconfig.h"
|
||||
@@ -193,7 +194,7 @@ static float i2s_apll_get_fi2s(int bits_per_sample, int sdm0, int sdm1, int sdm2
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
/* ESP32 rev0 silicon issue for APLL range/accuracy, please see ESP32 ECO document for more information on this */
|
||||
if (esp_efuse_get_chip_ver() == 0) {
|
||||
if (!ESP_CHIP_REV_ABOVE(efuse_hal_chip_revision(), 100)) {
|
||||
sdm0 = 0;
|
||||
sdm1 = 0;
|
||||
}
|
||||
@@ -1,849 +0,0 @@
|
||||
// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include <stdatomic.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "spi_common_internal.h"
|
||||
#include "esp_intr_alloc.h"
|
||||
#include "soc/soc_caps.h"
|
||||
#include "stdatomic.h"
|
||||
#include "esp_log.h"
|
||||
#include <strings.h>
|
||||
#include "esp_heap_caps.h"
|
||||
|
||||
/*
|
||||
* This lock is designed to solve the conflicts between SPI devices (used in tasks) and
|
||||
* the background operations (ISR or cache access).
|
||||
*
|
||||
* There are N (device/task) + 1 (BG) acquiring processer candidates that may touch the bus.
|
||||
*
|
||||
* The core of the lock is a `status` atomic variable, which is always available. No intermediate
|
||||
* status is allowed. The atomic operations (mainly `atomic_fetch_and`, `atomic_fetch_or`)
|
||||
* atomically read the status, and bitwisely write status value ORed / ANDed with given masks.
|
||||
*
|
||||
* Definitions of the status:
|
||||
* - [30] WEAK_BG_FLAG, active when the BG is the cache
|
||||
* - [29:20] LOCK bits, active when corresponding device is asking for acquiring
|
||||
* - [19:10] PENDING bits, active when the BG acknowledges the REQ bits, but hasn't fully handled them.
|
||||
* - [ 9: 0] REQ bits, active when corresponding device is requesting for BG operations.
|
||||
*
|
||||
* The REQ bits together PENDING bits are called BG bits, which represent the actual BG request
|
||||
* state of devices. Either one of REQ or PENDING being active indicates the device has pending BG
|
||||
* requests. Reason of having two bits instead of one is in the appendix below.
|
||||
*
|
||||
* Acquiring processer means the current processor (task or ISR) allowed to touch the critical
|
||||
* resources, or the SPI bus.
|
||||
*
|
||||
* States of the lock:
|
||||
* - STATE_IDLE: There's no acquiring processor. No device is acquiring the bus, and no BG
|
||||
* operation is in progress.
|
||||
*
|
||||
* - STATE_ACQ: The acquiring processor is a device task. This means one of the devices is
|
||||
* acquiring the bus.
|
||||
*
|
||||
* - STATE_BG: The acquiring processor is the ISR, and there is no acquiring device.
|
||||
*
|
||||
* - STATE_BG_ACQ: The acquiring processor is the ISR, and there is an acquiring device.
|
||||
*
|
||||
*
|
||||
* Whenever a bit is written to the status, it means the a device on a task is trying to acquire
|
||||
* the lock (either for the task, or the ISR). When there is no LOCK bits or BG bits active, the
|
||||
* caller immediately become the acquiring processor. Otherwise, the task has to block, and the ISR
|
||||
* will not be invoked until scheduled by the current acquiring processor.
|
||||
*
|
||||
* The acquiring processor is responsible to assign the next acquiring processor by calling the
|
||||
* scheduler, usually after it finishes some requests, and cleared the corresponding status bit.
|
||||
* But there is one exception, when the last bit is cleared from the status, after which there is
|
||||
* no other LOCK bits or BG bits active, the acquiring processor lost its role immediately, and
|
||||
* don't need to call the scheduler to assign the next acquiring processor.
|
||||
*
|
||||
* The acquiring processor may also choose to assign a new acquiring device when there is no, by
|
||||
* calling `spi_bus_lock_bg_rotate_acq_dev` in the ISR. But the acquiring processor, in this case,
|
||||
* is still the ISR, until it calls the scheduler.
|
||||
*
|
||||
*
|
||||
* Transition of the FSM:
|
||||
*
|
||||
* - STATE_IDLE: no acquiring device, nor acquiring processor, no LOCK or BG bits active
|
||||
* -> STATE_BG: by `req_core`
|
||||
* -> STATE_ACQ: by `acquire_core`
|
||||
*
|
||||
* - STATE_BG:
|
||||
* * No acquiring device, the ISR is the acquiring processor, there is BG bits active, but no LOCK
|
||||
* bits
|
||||
* * The BG operation should be enabled while turning into this state.
|
||||
*
|
||||
* -> STATE_IDLE: by `bg_exit_core` after `clear_pend_core` for all BG bits
|
||||
* -> STATE_BG_ACQ: by `schedule_core`, when there is new LOCK bit set (by `acquire_core`)
|
||||
*
|
||||
* - STATE_BG_ACQ:
|
||||
* * There is acquiring device, the ISR is the acquiring processor, there may be BG bits active for
|
||||
* the acquiring device.
|
||||
* * The BG operation should be enabled while turning into this state.
|
||||
*
|
||||
* -> STATE_ACQ: by `bg_exit_core` after `clear_pend_core` for all BG bits for the acquiring
|
||||
* device.
|
||||
*
|
||||
* Should not go to the STATE_ACQ (unblock the acquiring task) until all requests of the
|
||||
* acquiring device are finished. This is to preserve the sequence of foreground (polling) and
|
||||
* background operations of the device. The background operations queued before the acquiring
|
||||
* should be completed first.
|
||||
*
|
||||
* - STATE_ACQ:
|
||||
* * There is acquiring device, the task is the acquiring processor, there is no BG bits active for
|
||||
* the acquiring device.
|
||||
* * The acquiring task (if blocked at `spi_bus_lock_acquire_start` or `spi_bus_lock_wait_bg_done`)
|
||||
* should be resumed while turning into this state.
|
||||
*
|
||||
* -> STATE_BG_ACQ: by `req_core`
|
||||
* -> STATE_BG_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another
|
||||
* device, and the new acquiring device has active BG bits.
|
||||
* -> STATE_ACQ (other device): by `acquire_end_core`, when there is LOCK bit for another devices,
|
||||
* but the new acquiring device has no active BG bits.
|
||||
* -> STATE_BG: by `acquire_end_core` when there is no LOCK bit active, but there are active BG
|
||||
* bits.
|
||||
* -> STATE_IDLE: by `acquire_end_core` when there is no LOCK bit, nor BG bit active.
|
||||
*
|
||||
* The `req_core` used in the task is a little special. It asks for acquiring processor for the
|
||||
* ISR. When it succeed for the first time, it will invoke the ISR (hence passing the acquiring
|
||||
* role to the BG). Otherwise it will not block, the ISR will be automatically be invoked by other
|
||||
* acquiring processor. The caller of `req_core` will never become acquiring processor by this
|
||||
* function.
|
||||
*
|
||||
*
|
||||
* Appendix: The design, that having both request bit and pending bit, is to solve the
|
||||
* concurrency issue between tasks and the bg, when the task can queue several requests,
|
||||
* however the request bit cannot represent the number of requests queued.
|
||||
*
|
||||
* Here's the workflow of task and ISR work concurrently:
|
||||
* - Task: (a) Write to Queue -> (b) Write request bit
|
||||
* The Task have to write request bit (b) after the data is prepared in the queue (a),
|
||||
* otherwise the BG may fail to read from the queue when it sees the request bit set.
|
||||
*
|
||||
* - BG: (c) Read queue -> (d) Clear request bit
|
||||
* Since the BG cannot know the number of requests queued, it have to repeatedly check the
|
||||
* queue (c), until it find the data is empty, and then clear the request bit (d).
|
||||
*
|
||||
* The events are possible to happen in the order: (c) -> (a) -> (b) -> (d). This may cause a false
|
||||
* clear of the request bit. And there will be data prepared in the queue, but the request bit is
|
||||
* inactive.
|
||||
*
|
||||
* (e) move REQ bits to PEND bits, happen before (c) is introduced to solve this problem. In this
|
||||
* case (d) is changed to clear the PEND bit. Even if (e) -> (c) -> (a) -> (b) -> (d), only PEND
|
||||
* bit is cleared, while the REQ bit is still active.
|
||||
*/
|
||||
|
||||
struct spi_bus_lock_dev_t;
|
||||
typedef struct spi_bus_lock_dev_t spi_bus_lock_dev_t;
|
||||
|
||||
typedef struct spi_bus_lock_t spi_bus_lock_t;
|
||||
|
||||
|
||||
#define MAX_DEV_NUM 10
|
||||
|
||||
// Bit 29-20: lock bits, Bit 19-10: pending bits
|
||||
// Bit 9-0: request bits, Bit 30:
|
||||
#define LOCK_SHIFT 20
|
||||
#define PENDING_SHIFT 10
|
||||
#define REQ_SHIFT 0
|
||||
|
||||
#define WEAK_BG_FLAG BIT(30) /**< The bus is permanently requested by background operations.
|
||||
* This flag is weak, will not prevent acquiring of devices. But will help the BG to be re-enabled again after the bus is release.
|
||||
*/
|
||||
|
||||
// get the bit mask wher bit [high-1, low] are all 1'b1 s.
|
||||
#define BIT1_MASK(high, low) ((UINT32_MAX << (high)) ^ (UINT32_MAX << (low)))
|
||||
|
||||
#define LOCK_BIT(mask) ((mask) << LOCK_SHIFT)
|
||||
#define REQUEST_BIT(mask) ((mask) << REQ_SHIFT)
|
||||
#define PENDING_BIT(mask) ((mask) << PENDING_SHIFT)
|
||||
#define DEV_MASK(id) (LOCK_BIT(1<<id) | PENDING_BIT(1<<id) | REQUEST_BIT(1<<id))
|
||||
#define ID_DEV_MASK(mask) (ffs(mask) - 1)
|
||||
|
||||
#define REQ_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM, REQ_SHIFT)
|
||||
#define PEND_MASK BIT1_MASK(PENDING_SHIFT+MAX_DEV_NUM, PENDING_SHIFT)
|
||||
#define BG_MASK BIT1_MASK(REQ_SHIFT+MAX_DEV_NUM*2, REQ_SHIFT)
|
||||
#define LOCK_MASK BIT1_MASK(LOCK_SHIFT+MAX_DEV_NUM, LOCK_SHIFT)
|
||||
|
||||
#define DEV_REQ_MASK(dev) ((dev)->mask & REQ_MASK)
|
||||
#define DEV_PEND_MASK(dev) ((dev)->mask & PEND_MASK)
|
||||
#define DEV_BG_MASK(dev) ((dev)->mask & BG_MASK)
|
||||
|
||||
struct spi_bus_lock_t {
|
||||
/**
|
||||
* The core of the lock. These bits are status of the lock, which should be always available.
|
||||
* No intermediate status is allowed. This is realized by atomic operations, mainly
|
||||
* `atomic_fetch_and`, `atomic_fetch_or`, which atomically read the status, and bitwise write
|
||||
* status value ORed / ANDed with given masks.
|
||||
*
|
||||
* The request bits together pending bits represent the actual bg request state of one device.
|
||||
* Either one of them being active indicates the device has pending bg requests.
|
||||
*
|
||||
* Whenever a bit is written to the status, it means the a device on a task is trying to
|
||||
* acquire the lock. But this will succeed only when no LOCK or BG bits active.
|
||||
*
|
||||
* The acquiring processor is responsible to call the scheduler to pass its role to other tasks
|
||||
* or the BG, unless it clear the last bit in the status register.
|
||||
*/
|
||||
//// Critical resources, they are only writable by acquiring processor, and stable only when read by the acquiring processor.
|
||||
atomic_uint_fast32_t status;
|
||||
spi_bus_lock_dev_t* volatile acquiring_dev; ///< The acquiring device
|
||||
bool volatile acq_dev_bg_active; ///< BG is the acquiring processor serving the acquiring device, used for the wait_bg to skip waiting quickly.
|
||||
bool volatile in_isr; ///< ISR is touching HW
|
||||
//// End of critical resources
|
||||
|
||||
atomic_intptr_t dev[DEV_NUM_MAX]; ///< Child locks.
|
||||
bg_ctrl_func_t bg_enable; ///< Function to enable background operations.
|
||||
bg_ctrl_func_t bg_disable; ///< Function to disable background operations
|
||||
void* bg_arg; ///< Argument for `bg_enable` and `bg_disable` functions.
|
||||
|
||||
spi_bus_lock_dev_t* last_dev; ///< Last used device, to decide whether to refresh all registers.
|
||||
int periph_cs_num; ///< Number of the CS pins the HW has.
|
||||
|
||||
//debug information
|
||||
int host_id; ///< Host ID, for debug information printing
|
||||
uint32_t new_req; ///< Last int_req when `spi_bus_lock_bg_start` is called. Debug use.
|
||||
};
|
||||
|
||||
struct spi_bus_lock_dev_t {
|
||||
SemaphoreHandle_t semphr; ///< Binray semaphore to notify the device it claimed the bus
|
||||
spi_bus_lock_t* parent; ///< Pointer to parent spi_bus_lock_t
|
||||
uint32_t mask; ///< Bitwise OR-ed mask of the REQ, PEND, LOCK bits of this device
|
||||
};
|
||||
|
||||
portMUX_TYPE s_spinlock = portMUX_INITIALIZER_UNLOCKED;
|
||||
|
||||
DRAM_ATTR static const char TAG[] = "bus_lock";
|
||||
|
||||
#define LOCK_CHECK(a, str, ret_val, ...) \
|
||||
if (!(a)) { \
|
||||
ESP_LOGE(TAG,"%s(%d): "str, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
|
||||
return (ret_val); \
|
||||
}
|
||||
|
||||
static inline int mask_get_id(uint32_t mask);
|
||||
static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock);
|
||||
|
||||
/*******************************************************************************
|
||||
* atomic operations to the status
|
||||
******************************************************************************/
|
||||
SPI_MASTER_ISR_ATTR static inline uint32_t lock_status_fetch_set(spi_bus_lock_t *lock, uint32_t set)
|
||||
{
|
||||
return atomic_fetch_or(&lock->status, set);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline uint32_t lock_status_fetch_clear(spi_bus_lock_t *lock, uint32_t clear)
|
||||
{
|
||||
return atomic_fetch_and(&lock->status, ~clear);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline uint32_t lock_status_fetch(spi_bus_lock_t *lock)
|
||||
{
|
||||
return atomic_load(&lock->status);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR static inline void lock_status_init(spi_bus_lock_t *lock)
|
||||
{
|
||||
atomic_store(&lock->status, 0);
|
||||
}
|
||||
|
||||
// return the remaining status bits
|
||||
IRAM_ATTR static inline uint32_t lock_status_clear(spi_bus_lock_t* lock, uint32_t clear)
|
||||
{
|
||||
//the fetch and clear should be atomic, avoid missing the all '0' status when all bits are clear.
|
||||
uint32_t state = lock_status_fetch_clear(lock, clear);
|
||||
return state & (~clear);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Schedule service
|
||||
*
|
||||
* The modification to the status bits may cause rotating of the acquiring processor. It also have
|
||||
* effects to `acquired_dev` (the acquiring device), `in_isr` (HW used in BG), and
|
||||
* `acq_dev_bg_active` (wait_bg_end can be skipped) members of the lock structure.
|
||||
*
|
||||
* Most of them should be atomic, and special attention should be paid to the operation
|
||||
* sequence.
|
||||
******************************************************************************/
|
||||
SPI_MASTER_ISR_ATTR static inline void resume_dev_in_isr(spi_bus_lock_dev_t *dev_lock, BaseType_t *do_yield)
|
||||
{
|
||||
xSemaphoreGiveFromISR(dev_lock->semphr, do_yield);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline void resume_dev(const spi_bus_lock_dev_t *dev_lock)
|
||||
{
|
||||
xSemaphoreGive(dev_lock->semphr);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR static inline void bg_disable(spi_bus_lock_t *lock)
|
||||
{
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_disable);
|
||||
lock->bg_disable(lock->bg_arg);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline void bg_enable(spi_bus_lock_t* lock)
|
||||
{
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->bg_enable);
|
||||
lock->bg_enable(lock->bg_arg);
|
||||
}
|
||||
|
||||
// Set the REQ bit. If we become the acquiring processor, invoke the ISR and pass that to it.
|
||||
// The caller will never become the acquiring processor after this function returns.
|
||||
SPI_MASTER_ATTR static inline void req_core(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
spi_bus_lock_t *lock = dev_handle->parent;
|
||||
|
||||
// Though `acquired_dev` is critical resource, `dev_handle == lock->acquired_dev`
|
||||
// is a stable statement unless `acquire_start` or `acquire_end` is called by current
|
||||
// device.
|
||||
if (dev_handle == lock->acquiring_dev){
|
||||
// Set the REQ bit and check BG bits if we are the acquiring processor.
|
||||
// If the BG bits were not active before, invoke the BG again.
|
||||
|
||||
// Avoid competitive risk against the `clear_pend_core`, `acq_dev_bg_active` should be set before
|
||||
// setting REQ bit.
|
||||
lock->acq_dev_bg_active = true;
|
||||
uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
|
||||
if ((status & DEV_BG_MASK(dev_handle)) == 0) {
|
||||
bg_enable(lock); //acquiring processor passed to BG
|
||||
}
|
||||
} else {
|
||||
uint32_t status = lock_status_fetch_set(lock, DEV_REQ_MASK(dev_handle));
|
||||
if (status == 0) {
|
||||
bg_enable(lock); //acquiring processor passed to BG
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Set the LOCK bit. Handle related stuff and return true if we become the acquiring processor.
|
||||
SPI_MASTER_ISR_ATTR static inline bool acquire_core(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
portENTER_CRITICAL_SAFE(&s_spinlock);
|
||||
uint32_t status = lock_status_fetch_set(lock, dev_handle->mask & LOCK_MASK);
|
||||
portEXIT_CRITICAL_SAFE(&s_spinlock);
|
||||
|
||||
// Check all bits except WEAK_BG
|
||||
if ((status & (BG_MASK | LOCK_MASK)) == 0) {
|
||||
//succeed at once
|
||||
lock->acquiring_dev = dev_handle;
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
|
||||
if (status & WEAK_BG_FLAG) {
|
||||
//Mainly to disable the cache (Weak_BG), that is not able to disable itself
|
||||
bg_disable(lock);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the next acquiring processor according to the status. Will directly change
|
||||
* the acquiring device if new one found.
|
||||
*
|
||||
* Cases:
|
||||
* - BG should still be the acquiring processor (Return false):
|
||||
* 1. Acquiring device has active BG bits: out_desired_dev = new acquiring device
|
||||
* 2. No acquiring device, but BG active: out_desired_dev = randomly pick one device with active BG bits
|
||||
* - BG should yield to the task (Return true):
|
||||
* 3. Acquiring device has no active BG bits: out_desired_dev = new acquiring device
|
||||
* 4. No acquiring device while no active BG bits: out_desired_dev=NULL
|
||||
*
|
||||
* Acquiring device task need to be resumed only when case 3.
|
||||
*
|
||||
* This scheduling can happen in either task or ISR, so `in_isr` or `bg_active` not touched.
|
||||
*
|
||||
* @param lock
|
||||
* @param status Current status
|
||||
* @param out_desired_dev Desired device to work next, see above.
|
||||
*
|
||||
* @return False if BG should still be the acquiring processor, otherwise True (yield to task).
|
||||
*/
|
||||
IRAM_ATTR static inline bool
|
||||
schedule_core(spi_bus_lock_t *lock, uint32_t status, spi_bus_lock_dev_t **out_desired_dev)
|
||||
{
|
||||
spi_bus_lock_dev_t* desired_dev = NULL;
|
||||
uint32_t lock_bits = (status & LOCK_MASK) >> LOCK_SHIFT;
|
||||
uint32_t bg_bits = status & BG_MASK;
|
||||
bg_bits = ((bg_bits >> REQ_SHIFT) | (bg_bits >> PENDING_SHIFT)) & REQ_MASK;
|
||||
|
||||
bool bg_yield;
|
||||
if (lock_bits) {
|
||||
int dev_id = mask_get_id(lock_bits);
|
||||
desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
|
||||
|
||||
lock->acquiring_dev = desired_dev;
|
||||
bg_yield = ((bg_bits & desired_dev->mask) == 0);
|
||||
lock->acq_dev_bg_active = !bg_yield;
|
||||
} else {
|
||||
lock->acq_dev_bg_active = false;
|
||||
if (bg_bits) {
|
||||
int dev_id = mask_get_id(bg_bits);
|
||||
desired_dev = (spi_bus_lock_dev_t *)atomic_load(&lock->dev[dev_id]);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(desired_dev);
|
||||
|
||||
lock->acquiring_dev = NULL;
|
||||
bg_yield = false;
|
||||
} else {
|
||||
desired_dev = NULL;
|
||||
lock->acquiring_dev = NULL;
|
||||
bg_yield = true;
|
||||
}
|
||||
}
|
||||
*out_desired_dev = desired_dev;
|
||||
return bg_yield;
|
||||
}
|
||||
|
||||
//Clear the LOCK bit and trigger a rescheduling.
|
||||
IRAM_ATTR static inline void acquire_end_core(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
//uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
|
||||
spi_bus_lock_dev_t* desired_dev = NULL;
|
||||
|
||||
portENTER_CRITICAL_SAFE(&s_spinlock);
|
||||
uint32_t status = lock_status_clear(lock, dev_handle->mask & LOCK_MASK);
|
||||
bool invoke_bg = !schedule_core(lock, status, &desired_dev);
|
||||
portEXIT_CRITICAL_SAFE(&s_spinlock);
|
||||
|
||||
if (invoke_bg) {
|
||||
bg_enable(lock);
|
||||
} else if (desired_dev) {
|
||||
resume_dev(desired_dev);
|
||||
} else if (status & WEAK_BG_FLAG) {
|
||||
bg_enable(lock);
|
||||
}
|
||||
}
|
||||
|
||||
// Move the REQ bits to corresponding PEND bits. Must be called by acquiring processor.
|
||||
// Have no side effects on the acquiring device/processor.
|
||||
SPI_MASTER_ISR_ATTR static inline void update_pend_core(spi_bus_lock_t *lock, uint32_t status)
|
||||
{
|
||||
uint32_t active_req_bits = status & REQ_MASK;
|
||||
#if PENDING_SHIFT > REQ_SHIFT
|
||||
uint32_t pending_mask = active_req_bits << (PENDING_SHIFT - REQ_SHIFT);
|
||||
#else
|
||||
uint32_t pending_mask = active_req_bits >> (REQ_SHIFT - PENDING_SHIFT);
|
||||
#endif
|
||||
// We have to set the PEND bits and then clear the REQ bits, since BG bits are using bitwise OR logic,
|
||||
// this will not influence the effectiveness of the BG bits of every device.
|
||||
lock_status_fetch_set(lock, pending_mask);
|
||||
lock_status_fetch_clear(lock, active_req_bits);
|
||||
}
|
||||
|
||||
// Clear the PEND bit (not REQ bit!) of a device, return the suggestion whether we can try to quit the ISR.
|
||||
// Lost the acquiring processor immediately when the BG bits for active device are inactive, indiciating by the return value.
|
||||
// Can be called only when ISR is acting as the acquiring processor.
|
||||
SPI_MASTER_ISR_ATTR static inline bool clear_pend_core(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
bool finished;
|
||||
spi_bus_lock_t *lock = dev_handle->parent;
|
||||
uint32_t pend_mask = DEV_PEND_MASK(dev_handle);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock_status_fetch(lock) & pend_mask);
|
||||
|
||||
uint32_t status = lock_status_clear(lock, pend_mask);
|
||||
|
||||
if (lock->acquiring_dev == dev_handle) {
|
||||
finished = ((status & DEV_REQ_MASK(dev_handle)) == 0);
|
||||
if (finished) {
|
||||
lock->acq_dev_bg_active = false;
|
||||
}
|
||||
} else {
|
||||
finished = (status == 0);
|
||||
}
|
||||
return finished;
|
||||
}
|
||||
|
||||
// Return true if the ISR has already touched the HW, which means previous operations should
|
||||
// be terminated first, before we use the HW again. Otherwise return false.
|
||||
// In either case `in_isr` will be marked as true, until call to `bg_exit_core` with `wip=false` successfully.
|
||||
SPI_MASTER_ISR_ATTR static inline bool bg_entry_core(spi_bus_lock_t *lock)
|
||||
{
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
|
||||
/*
|
||||
* The interrupt is disabled at the entry of ISR to avoid competitive risk as below:
|
||||
*
|
||||
* The `esp_intr_enable` will be called (b) after new BG request is queued (a) in the task;
|
||||
* while `esp_intr_disable` should be called (c) if we check and found the sending queue is empty (d).
|
||||
* If (c) happens after (d), if things happens in this sequence:
|
||||
* (d) -> (a) -> (b) -> (c), the interrupt will be disabled while there's pending BG request in the queue.
|
||||
*
|
||||
* To avoid this, interrupt is disabled here, and re-enabled later if required. (c) -> (d) -> (a) -> (b) -> revert (c) if !d
|
||||
*/
|
||||
bg_disable(lock);
|
||||
if (lock->in_isr) {
|
||||
return false;
|
||||
} else {
|
||||
lock->in_isr = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle the conditions of status and interrupt, avoiding the ISR being disabled when there is any new coming BG requests.
|
||||
// When called with `wip=true`, means the ISR is performing some operations. Will enable the interrupt again and exit unconditionally.
|
||||
// When called with `wip=false`, will only return `true` when there is no coming BG request. If return value is `false`, the ISR should try again.
|
||||
// Will not change acquiring device.
|
||||
SPI_MASTER_ISR_ATTR static inline bool bg_exit_core(spi_bus_lock_t *lock, bool wip, BaseType_t *do_yield)
|
||||
{
|
||||
//See comments in `bg_entry_core`, re-enable interrupt disabled in entry if we do need the interrupt
|
||||
if (wip) {
|
||||
bg_enable(lock);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev || lock->acq_dev_bg_active);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ret;
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
if (lock->acquiring_dev) {
|
||||
if (status & DEV_BG_MASK(lock->acquiring_dev)) {
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acq_dev_bg_active);
|
||||
ret = false;
|
||||
} else {
|
||||
// The request may happen any time, even after we fetched the status.
|
||||
// The value of `acq_dev_bg_active` is random.
|
||||
resume_dev_in_isr(lock->acquiring_dev, do_yield);
|
||||
ret = true;
|
||||
}
|
||||
} else {
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
|
||||
ret = !(status & BG_MASK);
|
||||
}
|
||||
if (ret) {
|
||||
//when successfully exit, but no transaction done, mark BG as inactive
|
||||
lock->in_isr = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline void dev_wait_prepare(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
xSemaphoreTake(dev_handle->semphr, 0);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR static inline esp_err_t dev_wait(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
|
||||
{
|
||||
BaseType_t ret = xSemaphoreTake(dev_handle->semphr, wait);
|
||||
|
||||
if (ret == pdFALSE) return ESP_ERR_TIMEOUT;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Initialization & Deinitialization
|
||||
******************************************************************************/
|
||||
esp_err_t spi_bus_init_lock(spi_bus_lock_handle_t *out_lock, const spi_bus_lock_config_t *config)
|
||||
{
|
||||
spi_bus_lock_t* lock = (spi_bus_lock_t*)calloc(sizeof(spi_bus_lock_t), 1);
|
||||
if (lock == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
|
||||
lock_status_init(lock);
|
||||
lock->acquiring_dev = NULL;
|
||||
lock->last_dev = NULL;
|
||||
lock->periph_cs_num = config->cs_num;
|
||||
lock->host_id = config->host_id;
|
||||
|
||||
*out_lock = lock;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
void spi_bus_deinit_lock(spi_bus_lock_handle_t lock)
|
||||
{
|
||||
for (int i = 0; i < DEV_NUM_MAX; i++) {
|
||||
assert(atomic_load(&lock->dev[i]) == (intptr_t)NULL);
|
||||
}
|
||||
free(lock);
|
||||
}
|
||||
|
||||
static int try_acquire_free_dev(spi_bus_lock_t *lock, bool cs_required)
|
||||
{
|
||||
if (cs_required) {
|
||||
int i;
|
||||
for (i = 0; i < lock->periph_cs_num; i++) {
|
||||
intptr_t null = (intptr_t) NULL;
|
||||
//use 1 to occupy the slot, actual setup comes later
|
||||
if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ((i == lock->periph_cs_num)? -1: i);
|
||||
} else {
|
||||
int i;
|
||||
for (i = DEV_NUM_MAX - 1; i >= 0; i--) {
|
||||
intptr_t null = (intptr_t) NULL;
|
||||
//use 1 to occupy the slot, actual setup comes later
|
||||
if (atomic_compare_exchange_strong(&lock->dev[i], &null, (intptr_t) 1)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
esp_err_t spi_bus_lock_register_dev(spi_bus_lock_handle_t lock, spi_bus_lock_dev_config_t *config,
|
||||
spi_bus_lock_dev_handle_t *out_dev_handle)
|
||||
{
|
||||
if (lock == NULL) return ESP_ERR_INVALID_ARG;
|
||||
int id = try_acquire_free_dev(lock, config->flags & SPI_BUS_LOCK_DEV_FLAG_CS_REQUIRED);
|
||||
if (id == -1) return ESP_ERR_NOT_SUPPORTED;
|
||||
|
||||
spi_bus_lock_dev_t* dev_lock = (spi_bus_lock_dev_t*)heap_caps_calloc(sizeof(spi_bus_lock_dev_t), 1, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
|
||||
if (dev_lock == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
dev_lock->semphr = xSemaphoreCreateBinary();
|
||||
if (dev_lock->semphr == NULL) {
|
||||
free(dev_lock);
|
||||
atomic_store(&lock->dev[id], (intptr_t)NULL);
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
dev_lock->parent = lock;
|
||||
dev_lock->mask = DEV_MASK(id);
|
||||
|
||||
ESP_LOGV(TAG, "device registered on bus %d slot %d.", lock->host_id, id);
|
||||
atomic_store(&lock->dev[id], (intptr_t)dev_lock);
|
||||
*out_dev_handle = dev_lock;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
void spi_bus_lock_unregister_dev(spi_bus_lock_dev_handle_t dev_handle)
|
||||
{
|
||||
int id = dev_lock_get_id(dev_handle);
|
||||
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(atomic_load(&lock->dev[id]) == (intptr_t)dev_handle);
|
||||
|
||||
if (lock->last_dev == dev_handle) lock->last_dev = NULL;
|
||||
|
||||
atomic_store(&lock->dev[id], (intptr_t)NULL);
|
||||
if (dev_handle->semphr) {
|
||||
vSemaphoreDelete(dev_handle->semphr);
|
||||
}
|
||||
|
||||
free(dev_handle);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline int mask_get_id(uint32_t mask)
|
||||
{
|
||||
return ID_DEV_MASK(mask);
|
||||
}
|
||||
|
||||
IRAM_ATTR static inline int dev_lock_get_id(spi_bus_lock_dev_t *dev_lock)
|
||||
{
|
||||
return mask_get_id(dev_lock->mask);
|
||||
}
|
||||
|
||||
void spi_bus_lock_set_bg_control(spi_bus_lock_handle_t lock, bg_ctrl_func_t bg_enable, bg_ctrl_func_t bg_disable, void *arg)
|
||||
{
|
||||
lock->bg_enable = bg_enable;
|
||||
lock->bg_disable = bg_disable;
|
||||
lock->bg_arg = arg;
|
||||
}
|
||||
|
||||
IRAM_ATTR int spi_bus_lock_get_dev_id(spi_bus_lock_dev_handle_t dev_handle)
|
||||
{
|
||||
return (dev_handle? dev_lock_get_id(dev_handle): -1);
|
||||
}
|
||||
|
||||
//will be called when cache disabled
|
||||
IRAM_ATTR bool spi_bus_lock_touch(spi_bus_lock_dev_handle_t dev_handle)
|
||||
{
|
||||
spi_bus_lock_dev_t* last_dev = dev_handle->parent->last_dev;
|
||||
dev_handle->parent->last_dev = dev_handle;
|
||||
if (last_dev != dev_handle) {
|
||||
int last_dev_id = (last_dev? dev_lock_get_id(last_dev): -1);
|
||||
ESP_DRAM_LOGV(TAG, "SPI dev changed from %d to %d",
|
||||
last_dev_id, dev_lock_get_id(dev_handle));
|
||||
}
|
||||
return (dev_handle != last_dev);
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Acquiring service
|
||||
******************************************************************************/
|
||||
IRAM_ATTR esp_err_t spi_bus_lock_acquire_start(spi_bus_lock_dev_t *dev_handle, TickType_t wait)
|
||||
{
|
||||
LOCK_CHECK(wait == portMAX_DELAY, "timeout other than portMAX_DELAY not supported", ESP_ERR_INVALID_ARG);
|
||||
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
|
||||
// Clear the semaphore before checking
|
||||
dev_wait_prepare(dev_handle);
|
||||
if (!acquire_core(dev_handle)) {
|
||||
//block until becoming the acquiring processor (help by previous acquiring processor)
|
||||
esp_err_t err = dev_wait(dev_handle, wait);
|
||||
//TODO: add timeout handling here.
|
||||
if (err != ESP_OK) return err;
|
||||
}
|
||||
|
||||
ESP_DRAM_LOGV(TAG, "dev %d acquired.", dev_lock_get_id(dev_handle));
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(lock->acquiring_dev == dev_handle);
|
||||
|
||||
//When arrives at here, requests of this device should already be handled
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
(void) status;
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK((status & DEV_BG_MASK(dev_handle)) == 0);
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
IRAM_ATTR esp_err_t spi_bus_lock_acquire_end(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
//release the bus
|
||||
spi_bus_lock_t* lock = dev_handle->parent;
|
||||
LOCK_CHECK(lock->acquiring_dev == dev_handle, "Cannot release a lock that hasn't been acquired.", ESP_ERR_INVALID_STATE);
|
||||
|
||||
acquire_end_core(dev_handle);
|
||||
|
||||
ESP_LOGV(TAG, "dev %d released.", dev_lock_get_id(dev_handle));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR spi_bus_lock_dev_handle_t spi_bus_lock_get_acquiring_dev(spi_bus_lock_t *lock)
|
||||
{
|
||||
return lock->acquiring_dev;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* BG (background operation) service
|
||||
******************************************************************************/
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_entry(spi_bus_lock_t* lock)
|
||||
{
|
||||
return bg_entry_core(lock);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_exit(spi_bus_lock_t* lock, bool wip, BaseType_t* do_yield)
|
||||
{
|
||||
return bg_exit_core(lock, wip, do_yield);
|
||||
}
|
||||
|
||||
SPI_MASTER_ATTR esp_err_t spi_bus_lock_bg_request(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
req_core(dev_handle);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
IRAM_ATTR esp_err_t spi_bus_lock_wait_bg_done(spi_bus_lock_dev_handle_t dev_handle, TickType_t wait)
|
||||
{
|
||||
spi_bus_lock_t *lock = dev_handle->parent;
|
||||
LOCK_CHECK(lock->acquiring_dev == dev_handle, "Cannot wait for a device that is not acquired", ESP_ERR_INVALID_STATE);
|
||||
LOCK_CHECK(wait == portMAX_DELAY, "timeout other than portMAX_DELAY not supported", ESP_ERR_INVALID_ARG);
|
||||
|
||||
// If no BG bits active, skip quickly. This is ensured by `spi_bus_lock_wait_bg_done`
|
||||
// cannot be executed with `bg_request` on the same device concurrently.
|
||||
if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
|
||||
// Clear the semaphore before checking
|
||||
dev_wait_prepare(dev_handle);
|
||||
if (lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) {
|
||||
//block until becoming the acquiring processor (help by previous acquiring processor)
|
||||
esp_err_t err = dev_wait(dev_handle, wait);
|
||||
//TODO: add timeout handling here.
|
||||
if (err != ESP_OK) return err;
|
||||
}
|
||||
}
|
||||
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acq_dev_bg_active);
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK((lock_status_fetch(lock) & DEV_BG_MASK(dev_handle)) == 0);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_clear_req(spi_bus_lock_dev_t *dev_handle)
|
||||
{
|
||||
bool finished = clear_pend_core(dev_handle);
|
||||
ESP_EARLY_LOGV(TAG, "dev %d served from bg.", dev_lock_get_id(dev_handle));
|
||||
return finished;
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_acq(spi_bus_lock_t *lock,
|
||||
spi_bus_lock_dev_handle_t *out_dev_lock)
|
||||
{
|
||||
BUS_LOCK_DEBUG_EXECUTE_CHECK(!lock->acquiring_dev);
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
return schedule_core(lock, status, out_dev_lock);
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_check_dev_req(spi_bus_lock_dev_t *dev_lock)
|
||||
{
|
||||
spi_bus_lock_t* lock = dev_lock->parent;
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
uint32_t dev_status = status & dev_lock->mask;
|
||||
|
||||
// move REQ bits of all device to corresponding PEND bits.
|
||||
// To reduce executing time, only done when the REQ bit of the calling device is set.
|
||||
if (dev_status & REQ_MASK) {
|
||||
update_pend_core(lock, status);
|
||||
return true;
|
||||
} else {
|
||||
return dev_status & PEND_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
SPI_MASTER_ISR_ATTR bool spi_bus_lock_bg_req_exist(spi_bus_lock_t *lock)
|
||||
{
|
||||
uint32_t status = lock_status_fetch(lock);
|
||||
return status & BG_MASK;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Static variables of the locks of the main flash
|
||||
******************************************************************************/
|
||||
#if CONFIG_SPI_FLASH_SHARE_SPI1_BUS
|
||||
static spi_bus_lock_dev_t lock_main_flash_dev;
|
||||
|
||||
static spi_bus_lock_t main_spi_bus_lock = {
|
||||
/*
|
||||
* the main bus cache is permanently required, this flag is set here and never clear so that the
|
||||
* cache will always be enabled if acquiring devices yield.
|
||||
*/
|
||||
.status = ATOMIC_VAR_INIT(WEAK_BG_FLAG),
|
||||
.acquiring_dev = NULL,
|
||||
.dev = {ATOMIC_VAR_INIT((intptr_t)&lock_main_flash_dev)},
|
||||
.new_req = 0,
|
||||
.periph_cs_num = SOC_SPI_PERIPH_CS_NUM(0),
|
||||
};
|
||||
const spi_bus_lock_handle_t g_main_spi_bus_lock = &main_spi_bus_lock;
|
||||
|
||||
esp_err_t spi_bus_lock_init_main_bus(void)
|
||||
{
|
||||
spi_bus_main_set_lock(g_main_spi_bus_lock);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static StaticSemaphore_t main_flash_semphr;
|
||||
|
||||
static spi_bus_lock_dev_t lock_main_flash_dev = {
|
||||
.semphr = NULL,
|
||||
.parent = &main_spi_bus_lock,
|
||||
.mask = DEV_MASK(0),
|
||||
};
|
||||
const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = &lock_main_flash_dev;
|
||||
|
||||
esp_err_t spi_bus_lock_init_main_dev(void)
|
||||
{
|
||||
g_spi_lock_main_flash_dev->semphr = xSemaphoreCreateBinaryStatic(&main_flash_semphr);
|
||||
if (g_spi_lock_main_flash_dev->semphr == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
return ESP_OK;
|
||||
}
|
||||
#else //CONFIG_SPI_FLASH_SHARE_SPI1_BUS
|
||||
|
||||
//when the dev lock is not initialized, point to NULL
|
||||
const spi_bus_lock_dev_handle_t g_spi_lock_main_flash_dev = NULL;
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,8 @@
|
||||
idf_component_register( SRC_DIRS .
|
||||
INCLUDE_DIRS .
|
||||
PRIV_REQUIRES services bt display console tools platform_config
|
||||
if(IDF_TARGET STREQUAL "esp32")
|
||||
idf_component_register( SRC_DIRS .
|
||||
INCLUDE_DIRS .
|
||||
PRIV_REQUIRES services bt display console tools platform_config
|
||||
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
172
components/driver_bt/bt_app_core - Copy.c.old
Normal file
172
components/driver_bt/bt_app_core - Copy.c.old
Normal file
@@ -0,0 +1,172 @@
|
||||
/*
|
||||
This example code is in the Public Domain (or CC0 licensed, at your option.)
|
||||
|
||||
Unless required by applicable law or agreed to in writing, this
|
||||
software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||
CONDITIONS OF ANY KIND, either express or implied.
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
#include "esp_log.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/queue.h"
|
||||
#include "freertos/task.h"
|
||||
#include "esp_bt.h"
|
||||
#include "esp_bt_main.h"
|
||||
#include "esp_gap_bt_api.h"
|
||||
#include "bt_app_core.h"
|
||||
#include "tools.h"
|
||||
|
||||
static const char *TAG = "btappcore";
|
||||
|
||||
static void bt_app_task_handler(void *arg);
|
||||
static bool bt_app_send_msg(bt_app_msg_t *msg);
|
||||
static void bt_app_work_dispatched(bt_app_msg_t *msg);
|
||||
|
||||
static xQueueHandle s_bt_app_task_queue;
|
||||
static bool running;
|
||||
|
||||
bool bt_app_work_dispatch(bt_app_cb_t p_cback, uint16_t event, void *p_params, int param_len, bt_app_copy_cb_t p_copy_cback)
|
||||
{
|
||||
ESP_LOGV(TAG,"%s event 0x%x, param len %d", __func__, event, param_len);
|
||||
|
||||
bt_app_msg_t msg;
|
||||
memset(&msg, 0, sizeof(bt_app_msg_t));
|
||||
|
||||
msg.sig = BT_APP_SIG_WORK_DISPATCH;
|
||||
msg.event = event;
|
||||
msg.cb = p_cback;
|
||||
|
||||
if (param_len == 0) {
|
||||
return bt_app_send_msg(&msg);
|
||||
} else if (p_params && param_len > 0) {
|
||||
if ((msg.param = clone_obj_psram(p_params, param_len)) != NULL) {
|
||||
/* check if caller has provided a copy callback to do the deep copy */
|
||||
if (p_copy_cback) {
|
||||
p_copy_cback(&msg, msg.param, p_params);
|
||||
}
|
||||
return bt_app_send_msg(&msg);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool bt_app_send_msg(bt_app_msg_t *msg)
|
||||
{
|
||||
if (msg == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (xQueueSend(s_bt_app_task_queue, msg, 10 / portTICK_RATE_MS) != pdTRUE) {
|
||||
ESP_LOGE(TAG,"%s xQueue send failed", __func__);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void bt_app_work_dispatched(bt_app_msg_t *msg)
|
||||
{
|
||||
if (msg->cb) {
|
||||
msg->cb(msg->event, msg->param);
|
||||
}
|
||||
}
|
||||
|
||||
static void bt_app_task_handler(void *arg)
|
||||
{
|
||||
bt_app_msg_t msg;
|
||||
esp_err_t err;
|
||||
|
||||
s_bt_app_task_queue = xQueueCreate(10, sizeof(bt_app_msg_t));
|
||||
|
||||
esp_bt_controller_mem_release(ESP_BT_MODE_BLE);
|
||||
esp_bt_controller_config_t bt_cfg = BT_CONTROLLER_INIT_CONFIG_DEFAULT();
|
||||
|
||||
if ((err = esp_bt_controller_init(&bt_cfg)) != ESP_OK) {
|
||||
ESP_LOGE(TAG, "%s initialize controller failed: %s\n", __func__, esp_err_to_name(err));
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if ((err = esp_bt_controller_enable(ESP_BT_MODE_CLASSIC_BT)) != ESP_OK) {
|
||||
ESP_LOGE(TAG, "%s enable controller failed: %s\n", __func__, esp_err_to_name(err));
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if ((err = esp_bluedroid_init()) != ESP_OK) {
|
||||
ESP_LOGE(TAG, "%s initialize bluedroid failed: %s\n", __func__, esp_err_to_name(err));
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if ((err = esp_bluedroid_enable()) != ESP_OK) {
|
||||
ESP_LOGE(TAG, "%s enable bluedroid failed: %s\n", __func__, esp_err_to_name(err));
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Bluetooth device name, connection mode and profile set up */
|
||||
bt_app_work_dispatch((bt_av_hdl_stack_evt_t*) arg, BT_APP_EVT_STACK_UP, NULL, 0, NULL);
|
||||
|
||||
#if (CONFIG_BT_SSP_ENABLED)
|
||||
/* Set default parameters for Secure Simple Pairing */
|
||||
esp_bt_sp_param_t param_type = ESP_BT_SP_IOCAP_MODE;
|
||||
esp_bt_io_cap_t iocap = ESP_BT_IO_CAP_IO;
|
||||
esp_bt_gap_set_security_param(param_type, &iocap, sizeof(uint8_t));
|
||||
#endif
|
||||
|
||||
running = true;
|
||||
|
||||
while (running) {
|
||||
if (pdTRUE == xQueueReceive(s_bt_app_task_queue, &msg, (portTickType)portMAX_DELAY)) {
|
||||
ESP_LOGV(TAG,"%s, sig 0x%x, 0x%x", __func__, msg.sig, msg.event);
|
||||
|
||||
switch (msg.sig) {
|
||||
case BT_APP_SIG_WORK_DISPATCH:
|
||||
bt_app_work_dispatched(&msg);
|
||||
break;
|
||||
default:
|
||||
ESP_LOGW(TAG,"%s, unhandled sig: %d", __func__, msg.sig);
|
||||
break;
|
||||
}
|
||||
|
||||
if (msg.param) {
|
||||
free(msg.param);
|
||||
}
|
||||
} else {
|
||||
ESP_LOGW(TAG,"No messaged received from queue.");
|
||||
}
|
||||
}
|
||||
|
||||
ESP_LOGD(TAG, "bt_app_task shutting down");
|
||||
|
||||
if (esp_bluedroid_disable() != ESP_OK) goto exit;
|
||||
// this disable has a sleep timer BTA_DISABLE_DELAY in bt_target.h and
|
||||
// if we don't wait for it then disable crashes... don't know why
|
||||
vTaskDelay(2*200 / portTICK_PERIOD_MS);
|
||||
|
||||
ESP_LOGD(TAG, "esp_bluedroid_disable called successfully");
|
||||
if (esp_bluedroid_deinit() != ESP_OK) goto exit;
|
||||
|
||||
ESP_LOGD(TAG, "esp_bluedroid_deinit called successfully");
|
||||
if (esp_bt_controller_disable() != ESP_OK) goto exit;
|
||||
|
||||
ESP_LOGD(TAG, "esp_bt_controller_disable called successfully");
|
||||
if (esp_bt_controller_deinit() != ESP_OK) goto exit;
|
||||
|
||||
ESP_LOGD(TAG, "bt stopped successfully");
|
||||
|
||||
exit:
|
||||
vQueueDelete(s_bt_app_task_queue);
|
||||
running = false;
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
void bt_app_task_start_up(bt_av_hdl_stack_evt_t* handler)
|
||||
{
|
||||
xTaskCreate(bt_app_task_handler, "BtAppT", 4096, handler, configMAX_PRIORITIES - 3, NULL);
|
||||
}
|
||||
|
||||
void bt_app_task_shut_down(void)
|
||||
{
|
||||
running = false;
|
||||
}
|
||||
1064
components/driver_bt/bt_app_source - Copy.c.old
Normal file
1064
components/driver_bt/bt_app_source - Copy.c.old
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,49 +0,0 @@
|
||||
set(srcs
|
||||
"heap_caps.c"
|
||||
"heap_caps_init.c"
|
||||
"multi_heap.c"
|
||||
"heap_tlsf.c")
|
||||
|
||||
if(NOT CONFIG_HEAP_POISONING_DISABLED)
|
||||
list(APPEND srcs "multi_heap_poisoning.c")
|
||||
endif()
|
||||
|
||||
if(CONFIG_HEAP_TASK_TRACKING)
|
||||
list(APPEND srcs "heap_task_info.c")
|
||||
endif()
|
||||
|
||||
if(CONFIG_HEAP_TRACING_STANDALONE)
|
||||
list(APPEND srcs "heap_trace_standalone.c")
|
||||
set_source_files_properties(heap_trace_standalone.c
|
||||
PROPERTIES COMPILE_FLAGS
|
||||
-Wno-frame-address)
|
||||
endif()
|
||||
|
||||
idf_component_register(SRCS "${srcs}"
|
||||
INCLUDE_DIRS include
|
||||
LDFRAGMENTS linker.lf
|
||||
PRIV_REQUIRES soc)
|
||||
|
||||
if(CONFIG_HEAP_TRACING)
|
||||
set(WRAP_FUNCTIONS
|
||||
calloc
|
||||
malloc
|
||||
free
|
||||
realloc
|
||||
heap_caps_malloc
|
||||
heap_caps_free
|
||||
heap_caps_realloc
|
||||
heap_caps_malloc_default
|
||||
heap_caps_realloc_default)
|
||||
|
||||
foreach(wrap ${WRAP_FUNCTIONS})
|
||||
target_link_libraries(${COMPONENT_LIB} INTERFACE "-Wl,--wrap=${wrap}")
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_BUILD_EARLY_EXPANSION)
|
||||
idf_build_get_property(build_components BUILD_COMPONENTS)
|
||||
if(freertos IN_LIST build_components)
|
||||
target_compile_options(${COMPONENT_TARGET} PRIVATE "-DMULTI_HEAP_FREERTOS")
|
||||
endif()
|
||||
endif()
|
||||
@@ -1,74 +0,0 @@
|
||||
menu "Heap memory debugging"
|
||||
|
||||
choice HEAP_CORRUPTION_DETECTION
|
||||
prompt "Heap corruption detection"
|
||||
default HEAP_POISONING_DISABLED
|
||||
help
|
||||
Enable heap poisoning features to detect heap corruption caused by out-of-bounds access to heap memory.
|
||||
|
||||
See the "Heap Memory Debugging" page of the IDF documentation
|
||||
for a description of each level of heap corruption detection.
|
||||
|
||||
config HEAP_POISONING_DISABLED
|
||||
bool "Basic (no poisoning)"
|
||||
config HEAP_POISONING_LIGHT
|
||||
bool "Light impact"
|
||||
config HEAP_POISONING_COMPREHENSIVE
|
||||
bool "Comprehensive"
|
||||
endchoice
|
||||
|
||||
choice HEAP_TRACING_DEST
|
||||
bool "Heap tracing"
|
||||
default HEAP_TRACING_OFF
|
||||
help
|
||||
Enables the heap tracing API defined in esp_heap_trace.h.
|
||||
|
||||
This function causes a moderate increase in IRAM code side and a minor increase in heap function
|
||||
(malloc/free/realloc) CPU overhead, even when the tracing feature is not used.
|
||||
So it's best to keep it disabled unless tracing is being used.
|
||||
|
||||
config HEAP_TRACING_OFF
|
||||
bool "Disabled"
|
||||
config HEAP_TRACING_STANDALONE
|
||||
bool "Standalone"
|
||||
select HEAP_TRACING
|
||||
config HEAP_TRACING_TOHOST
|
||||
bool "Host-based"
|
||||
select HEAP_TRACING
|
||||
endchoice
|
||||
|
||||
config HEAP_TRACING
|
||||
bool
|
||||
default F
|
||||
help
|
||||
Enables/disables heap tracing API.
|
||||
|
||||
config HEAP_TRACING_STACK_DEPTH
|
||||
int "Heap tracing stack depth"
|
||||
range 0 0 if IDF_TARGET_ARCH_RISCV # Disabled for RISC-V due to `__builtin_return_address` limitation
|
||||
default 0 if IDF_TARGET_ARCH_RISCV
|
||||
range 0 10
|
||||
default 2
|
||||
depends on HEAP_TRACING
|
||||
help
|
||||
Number of stack frames to save when tracing heap operation callers.
|
||||
|
||||
More stack frames uses more memory in the heap trace buffer (and slows down allocation), but
|
||||
can provide useful information.
|
||||
|
||||
config HEAP_TASK_TRACKING
|
||||
bool "Enable heap task tracking"
|
||||
depends on !HEAP_POISONING_DISABLED
|
||||
help
|
||||
Enables tracking the task responsible for each heap allocation.
|
||||
|
||||
This function depends on heap poisoning being enabled and adds four more bytes of overhead for each block
|
||||
allocated.
|
||||
|
||||
config HEAP_ABORT_WHEN_ALLOCATION_FAILS
|
||||
bool "Abort if memory allocation fails"
|
||||
default n
|
||||
help
|
||||
When enabled, if a memory allocation operation fails it will cause a system abort.
|
||||
|
||||
endmenu
|
||||
@@ -1,32 +0,0 @@
|
||||
#
|
||||
# Component Makefile
|
||||
#
|
||||
|
||||
COMPONENT_OBJS := heap_caps_init.o heap_caps.o multi_heap.o heap_tlsf.o
|
||||
|
||||
ifndef CONFIG_HEAP_POISONING_DISABLED
|
||||
COMPONENT_OBJS += multi_heap_poisoning.o
|
||||
|
||||
ifdef CONFIG_HEAP_TASK_TRACKING
|
||||
COMPONENT_OBJS += heap_task_info.o
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_HEAP_TRACING_STANDALONE
|
||||
|
||||
COMPONENT_OBJS += heap_trace_standalone.o
|
||||
|
||||
endif
|
||||
|
||||
ifdef CONFIG_HEAP_TRACING
|
||||
|
||||
WRAP_FUNCTIONS = calloc malloc free realloc heap_caps_malloc heap_caps_free heap_caps_realloc heap_caps_malloc_default heap_caps_realloc_default
|
||||
WRAP_ARGUMENT := -Wl,--wrap=
|
||||
|
||||
COMPONENT_ADD_LDFLAGS = -l$(COMPONENT_NAME) $(addprefix $(WRAP_ARGUMENT),$(WRAP_FUNCTIONS))
|
||||
|
||||
endif
|
||||
|
||||
COMPONENT_ADD_LDFRAGMENTS += linker.lf
|
||||
|
||||
CFLAGS += -DMULTI_HEAP_FREERTOS
|
||||
@@ -1,609 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/param.h>
|
||||
#include "esp_attr.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "multi_heap.h"
|
||||
#include "esp_log.h"
|
||||
#include "heap_private.h"
|
||||
#include "esp_system.h"
|
||||
|
||||
/*
|
||||
This file, combined with a region allocator that supports multiple heaps, solves the problem that the ESP32 has RAM
|
||||
that's slightly heterogeneous. Some RAM can be byte-accessed, some allows only 32-bit accesses, some can execute memory,
|
||||
some can be remapped by the MMU to only be accessed by a certain PID etc. In order to allow the most flexible memory
|
||||
allocation possible, this code makes it possible to request memory that has certain capabilities. The code will then use
|
||||
its knowledge of how the memory is configured along with a priority scheme to allocate that memory in the most sane way
|
||||
possible. This should optimize the amount of RAM accessible to the code without hardwiring addresses.
|
||||
*/
|
||||
|
||||
static esp_alloc_failed_hook_t alloc_failed_callback;
|
||||
|
||||
/*
|
||||
This takes a memory chunk in a region that can be addressed as both DRAM as well as IRAM. It will convert it to
|
||||
IRAM in such a way that it can be later freed. It assumes both the address as well as the length to be word-aligned.
|
||||
It returns a region that's 1 word smaller than the region given because it stores the original Dram address there.
|
||||
*/
|
||||
IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len)
|
||||
{
|
||||
uintptr_t dstart = (uintptr_t)addr; //First word
|
||||
uintptr_t dend = dstart + len - 4; //Last word
|
||||
assert(esp_ptr_in_diram_dram((void *)dstart));
|
||||
assert(esp_ptr_in_diram_dram((void *)dend));
|
||||
assert((dstart & 3) == 0);
|
||||
assert((dend & 3) == 0);
|
||||
#if SOC_DIRAM_INVERTED // We want the word before the result to hold the DRAM address
|
||||
uint32_t *iptr = esp_ptr_diram_dram_to_iram((void *)dend);
|
||||
#else
|
||||
uint32_t *iptr = esp_ptr_diram_dram_to_iram((void *)dstart);
|
||||
#endif
|
||||
*iptr = dstart;
|
||||
return iptr + 1;
|
||||
}
|
||||
|
||||
|
||||
static void heap_caps_alloc_failed(size_t requested_size, uint32_t caps, const char *function_name)
|
||||
{
|
||||
if (alloc_failed_callback) {
|
||||
alloc_failed_callback(requested_size, caps, function_name);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
|
||||
esp_system_abort("Memory allocation failed");
|
||||
#endif
|
||||
}
|
||||
|
||||
esp_err_t heap_caps_register_failed_alloc_callback(esp_alloc_failed_hook_t callback)
|
||||
{
|
||||
if (callback == NULL) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
alloc_failed_callback = callback;
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
bool heap_caps_match(const heap_t *heap, uint32_t caps)
|
||||
{
|
||||
return heap->heap != NULL && ((get_all_caps(heap) & caps) == caps);
|
||||
}
|
||||
|
||||
/*
|
||||
Routine to allocate a bit of memory with certain capabilities. caps is a bitfield of MALLOC_CAP_* bits.
|
||||
*/
|
||||
IRAM_ATTR void *heap_caps_malloc( size_t size, uint32_t caps )
|
||||
{
|
||||
void *ret = NULL;
|
||||
|
||||
if (size > HEAP_SIZE_MAX) {
|
||||
// Avoids int overflow when adding small numbers to size, or
|
||||
// calculating 'end' from start+size, by limiting 'size' to the possible range
|
||||
heap_caps_alloc_failed(size, caps, __func__);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (caps & MALLOC_CAP_EXEC) {
|
||||
//MALLOC_CAP_EXEC forces an alloc from IRAM. There is a region which has both this as well as the following
|
||||
//caps, but the following caps are not possible for IRAM. Thus, the combination is impossible and we return
|
||||
//NULL directly, even although our heap capabilities (based on soc_memory_tags & soc_memory_regions) would
|
||||
//indicate there is a tag for this.
|
||||
if ((caps & MALLOC_CAP_8BIT) || (caps & MALLOC_CAP_DMA)) {
|
||||
heap_caps_alloc_failed(size, caps, __func__);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
caps |= MALLOC_CAP_32BIT; // IRAM is 32-bit accessible RAM
|
||||
}
|
||||
|
||||
if (caps & MALLOC_CAP_32BIT) {
|
||||
/* 32-bit accessible RAM should allocated in 4 byte aligned sizes
|
||||
* (Future versions of ESP-IDF should possibly fail if an invalid size is requested)
|
||||
*/
|
||||
size = (size + 3) & (~3); // int overflow checked above
|
||||
}
|
||||
|
||||
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
|
||||
//Iterate over heaps and check capabilities at this priority
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap->heap == NULL) {
|
||||
continue;
|
||||
}
|
||||
if ((heap->caps[prio] & caps) != 0) {
|
||||
//Heap has at least one of the caps requested. If caps has other bits set that this prio
|
||||
//doesn't cover, see if they're available in other prios.
|
||||
if ((get_all_caps(heap) & caps) == caps) {
|
||||
//This heap can satisfy all the requested capabilities. See if we can grab some memory using it.
|
||||
if ((caps & MALLOC_CAP_EXEC) && esp_ptr_in_diram_dram((void *)heap->start)) {
|
||||
//This is special, insofar that what we're going to get back is a DRAM address. If so,
|
||||
//we need to 'invert' it (lowest address in DRAM == highest address in IRAM and vice-versa) and
|
||||
//add a pointer to the DRAM equivalent before the address we're going to return.
|
||||
ret = multi_heap_malloc(heap->heap, size + 4); // int overflow checked above
|
||||
|
||||
if (ret != NULL) {
|
||||
return dram_alloc_to_iram_addr(ret, size + 4); // int overflow checked above
|
||||
}
|
||||
} else {
|
||||
//Just try to alloc, nothing special.
|
||||
ret = multi_heap_malloc(heap->heap, size);
|
||||
if (ret != NULL) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
heap_caps_alloc_failed(size, caps, __func__);
|
||||
|
||||
//Nothing usable found.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
#define MALLOC_DISABLE_EXTERNAL_ALLOCS -1
|
||||
//Dual-use: -1 (=MALLOC_DISABLE_EXTERNAL_ALLOCS) disables allocations in external memory, >=0 sets the limit for allocations preferring internal memory.
|
||||
static int malloc_alwaysinternal_limit=MALLOC_DISABLE_EXTERNAL_ALLOCS;
|
||||
|
||||
void heap_caps_malloc_extmem_enable(size_t limit)
|
||||
{
|
||||
malloc_alwaysinternal_limit=limit;
|
||||
}
|
||||
|
||||
/*
|
||||
Default memory allocation implementation. Should return standard 8-bit memory. malloc() essentially resolves to this function.
|
||||
*/
|
||||
IRAM_ATTR void *heap_caps_malloc_default( size_t size )
|
||||
{
|
||||
if (malloc_alwaysinternal_limit==MALLOC_DISABLE_EXTERNAL_ALLOCS) {
|
||||
return heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL);
|
||||
} else {
|
||||
void *r;
|
||||
if (size <= (size_t)malloc_alwaysinternal_limit) {
|
||||
r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
|
||||
} else {
|
||||
r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
|
||||
}
|
||||
if (r==NULL) {
|
||||
//try again while being less picky
|
||||
r=heap_caps_malloc( size, MALLOC_CAP_DEFAULT );
|
||||
}
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Same for realloc()
|
||||
Note: keep the logic in here the same as in heap_caps_malloc_default (or merge the two as soon as this gets more complex...)
|
||||
*/
|
||||
IRAM_ATTR void *heap_caps_realloc_default( void *ptr, size_t size )
|
||||
{
|
||||
if (malloc_alwaysinternal_limit==MALLOC_DISABLE_EXTERNAL_ALLOCS) {
|
||||
return heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
|
||||
} else {
|
||||
void *r;
|
||||
if (size <= (size_t)malloc_alwaysinternal_limit) {
|
||||
r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_INTERNAL );
|
||||
} else {
|
||||
r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT | MALLOC_CAP_SPIRAM );
|
||||
}
|
||||
if (r==NULL && size>0) {
|
||||
//We needed to allocate memory, but we didn't. Try again while being less picky.
|
||||
r=heap_caps_realloc( ptr, size, MALLOC_CAP_DEFAULT );
|
||||
}
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Memory allocation as preference in decreasing order.
|
||||
*/
|
||||
IRAM_ATTR void *heap_caps_malloc_prefer( size_t size, size_t num, ... )
|
||||
{
|
||||
va_list argp;
|
||||
va_start( argp, num );
|
||||
void *r = NULL;
|
||||
while (num--) {
|
||||
uint32_t caps = va_arg( argp, uint32_t );
|
||||
r = heap_caps_malloc( size, caps );
|
||||
if (r != NULL) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
va_end( argp );
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
Memory reallocation as preference in decreasing order.
|
||||
*/
|
||||
IRAM_ATTR void *heap_caps_realloc_prefer( void *ptr, size_t size, size_t num, ... )
|
||||
{
|
||||
va_list argp;
|
||||
va_start( argp, num );
|
||||
void *r = NULL;
|
||||
while (num--) {
|
||||
uint32_t caps = va_arg( argp, uint32_t );
|
||||
r = heap_caps_realloc( ptr, size, caps );
|
||||
if (r != NULL || size == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
va_end( argp );
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
Memory callocation as preference in decreasing order.
|
||||
*/
|
||||
IRAM_ATTR void *heap_caps_calloc_prefer( size_t n, size_t size, size_t num, ... )
|
||||
{
|
||||
va_list argp;
|
||||
va_start( argp, num );
|
||||
void *r = NULL;
|
||||
while (num--) {
|
||||
uint32_t caps = va_arg( argp, uint32_t );
|
||||
r = heap_caps_calloc( n, size, caps );
|
||||
if (r != NULL) break;
|
||||
}
|
||||
va_end( argp );
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Find the heap which belongs to ptr, or return NULL if it's
|
||||
not in any heap.
|
||||
|
||||
(This confirms if ptr is inside the heap's region, doesn't confirm if 'ptr'
|
||||
is an allocated block or is some other random address inside the heap.)
|
||||
*/
|
||||
IRAM_ATTR static heap_t *find_containing_heap(void *ptr )
|
||||
{
|
||||
intptr_t p = (intptr_t)ptr;
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap->heap != NULL && p >= heap->start && p < heap->end) {
|
||||
return heap;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
IRAM_ATTR void heap_caps_free( void *ptr)
|
||||
{
|
||||
if (ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (esp_ptr_in_diram_iram(ptr)) {
|
||||
//Memory allocated here is actually allocated in the DRAM alias region and
|
||||
//cannot be de-allocated as usual. dram_alloc_to_iram_addr stores a pointer to
|
||||
//the equivalent DRAM address, though; free that.
|
||||
uint32_t *dramAddrPtr = (uint32_t *)ptr;
|
||||
ptr = (void *)dramAddrPtr[-1];
|
||||
}
|
||||
|
||||
heap_t *heap = find_containing_heap(ptr);
|
||||
assert(heap != NULL && "free() target pointer is outside heap areas");
|
||||
multi_heap_free(heap->heap, ptr);
|
||||
}
|
||||
|
||||
IRAM_ATTR void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps)
|
||||
{
|
||||
bool ptr_in_diram_case = false;
|
||||
heap_t *heap = NULL;
|
||||
void *dram_ptr = NULL;
|
||||
|
||||
if (ptr == NULL) {
|
||||
return heap_caps_malloc(size, caps);
|
||||
}
|
||||
|
||||
if (size == 0) {
|
||||
heap_caps_free(ptr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (size > HEAP_SIZE_MAX) {
|
||||
heap_caps_alloc_failed(size, caps, __func__);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//The pointer to memory may be aliased, we need to
|
||||
//recover the corresponding address before to manage a new allocation:
|
||||
if(esp_ptr_in_diram_iram((void *)ptr)) {
|
||||
uint32_t *dram_addr = (uint32_t *)ptr;
|
||||
dram_ptr = (void *)dram_addr[-1];
|
||||
|
||||
heap = find_containing_heap(dram_ptr);
|
||||
assert(heap != NULL && "realloc() pointer is outside heap areas");
|
||||
|
||||
//with pointers that reside on diram space, we avoid using
|
||||
//the realloc implementation due to address translation issues,
|
||||
//instead force a malloc/copy/free
|
||||
ptr_in_diram_case = true;
|
||||
|
||||
} else {
|
||||
heap = find_containing_heap(ptr);
|
||||
assert(heap != NULL && "realloc() pointer is outside heap areas");
|
||||
}
|
||||
|
||||
// are the existing heap's capabilities compatible with the
|
||||
// requested ones?
|
||||
bool compatible_caps = (caps & get_all_caps(heap)) == caps;
|
||||
|
||||
if (compatible_caps && !ptr_in_diram_case) {
|
||||
// try to reallocate this memory within the same heap
|
||||
// (which will resize the block if it can)
|
||||
void *r = multi_heap_realloc(heap->heap, ptr, size);
|
||||
if (r != NULL) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
// if we couldn't do that, try to see if we can reallocate
|
||||
// in a different heap with requested capabilities.
|
||||
void *new_p = heap_caps_malloc(size, caps);
|
||||
if (new_p != NULL) {
|
||||
size_t old_size = 0;
|
||||
|
||||
//If we're dealing with aliased ptr, information regarding its containing
|
||||
//heap can only be obtained with translated address.
|
||||
if(ptr_in_diram_case) {
|
||||
old_size = multi_heap_get_allocated_size(heap->heap, dram_ptr);
|
||||
} else {
|
||||
old_size = multi_heap_get_allocated_size(heap->heap, ptr);
|
||||
}
|
||||
|
||||
assert(old_size > 0);
|
||||
memcpy(new_p, ptr, MIN(size, old_size));
|
||||
heap_caps_free(ptr);
|
||||
return new_p;
|
||||
}
|
||||
|
||||
heap_caps_alloc_failed(size, caps, __func__);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
IRAM_ATTR void *heap_caps_calloc( size_t n, size_t size, uint32_t caps)
|
||||
{
|
||||
void *result;
|
||||
size_t size_bytes;
|
||||
|
||||
if (__builtin_mul_overflow(n, size, &size_bytes)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result = heap_caps_malloc(size_bytes, caps);
|
||||
if (result != NULL) {
|
||||
bzero(result, size_bytes);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t heap_caps_get_total_size(uint32_t caps)
|
||||
{
|
||||
size_t total_size = 0;
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap_caps_match(heap, caps)) {
|
||||
total_size += (heap->end - heap->start);
|
||||
}
|
||||
}
|
||||
return total_size;
|
||||
}
|
||||
|
||||
size_t heap_caps_get_free_size( uint32_t caps )
|
||||
{
|
||||
size_t ret = 0;
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap_caps_match(heap, caps)) {
|
||||
ret += multi_heap_free_size(heap->heap);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t heap_caps_get_minimum_free_size( uint32_t caps )
|
||||
{
|
||||
size_t ret = 0;
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap_caps_match(heap, caps)) {
|
||||
ret += multi_heap_minimum_free_size(heap->heap);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t heap_caps_get_largest_free_block( uint32_t caps )
|
||||
{
|
||||
multi_heap_info_t info;
|
||||
heap_caps_get_info(&info, caps);
|
||||
return info.largest_free_block;
|
||||
}
|
||||
|
||||
void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps )
|
||||
{
|
||||
bzero(info, sizeof(multi_heap_info_t));
|
||||
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap_caps_match(heap, caps)) {
|
||||
multi_heap_info_t hinfo;
|
||||
multi_heap_get_info(heap->heap, &hinfo);
|
||||
|
||||
info->total_free_bytes += hinfo.total_free_bytes;
|
||||
info->total_allocated_bytes += hinfo.total_allocated_bytes;
|
||||
info->largest_free_block = MAX(info->largest_free_block,
|
||||
hinfo.largest_free_block);
|
||||
info->minimum_free_bytes += hinfo.minimum_free_bytes;
|
||||
info->allocated_blocks += hinfo.allocated_blocks;
|
||||
info->free_blocks += hinfo.free_blocks;
|
||||
info->total_blocks += hinfo.total_blocks;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void heap_caps_print_heap_info( uint32_t caps )
|
||||
{
|
||||
multi_heap_info_t info;
|
||||
printf("Heap summary for capabilities 0x%08X:\n", caps);
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap_caps_match(heap, caps)) {
|
||||
multi_heap_get_info(heap->heap, &info);
|
||||
|
||||
printf(" At 0x%08x len %d free %d allocated %d min_free %d\n",
|
||||
heap->start, heap->end - heap->start, info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes);
|
||||
printf(" largest_free_block %d alloc_blocks %d free_blocks %d total_blocks %d\n",
|
||||
info.largest_free_block, info.allocated_blocks,
|
||||
info.free_blocks, info.total_blocks);
|
||||
}
|
||||
}
|
||||
printf(" Totals:\n");
|
||||
heap_caps_get_info(&info, caps);
|
||||
|
||||
printf(" free %d allocated %d min_free %d largest_free_block %d\n", info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes, info.largest_free_block);
|
||||
}
|
||||
|
||||
bool heap_caps_check_integrity(uint32_t caps, bool print_errors)
|
||||
{
|
||||
bool all_heaps = caps & MALLOC_CAP_INVALID;
|
||||
bool valid = true;
|
||||
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap->heap != NULL
|
||||
&& (all_heaps || (get_all_caps(heap) & caps) == caps)) {
|
||||
valid = multi_heap_check(heap->heap, print_errors) && valid;
|
||||
}
|
||||
}
|
||||
|
||||
return valid;
|
||||
}
|
||||
|
||||
bool heap_caps_check_integrity_all(bool print_errors)
|
||||
{
|
||||
return heap_caps_check_integrity(MALLOC_CAP_INVALID, print_errors);
|
||||
}
|
||||
|
||||
bool heap_caps_check_integrity_addr(intptr_t addr, bool print_errors)
|
||||
{
|
||||
heap_t *heap = find_containing_heap((void *)addr);
|
||||
if (heap == NULL) {
|
||||
return false;
|
||||
}
|
||||
return multi_heap_check(heap->heap, print_errors);
|
||||
}
|
||||
|
||||
void heap_caps_dump(uint32_t caps)
|
||||
{
|
||||
bool all_heaps = caps & MALLOC_CAP_INVALID;
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap->heap != NULL
|
||||
&& (all_heaps || (get_all_caps(heap) & caps) == caps)) {
|
||||
multi_heap_dump(heap->heap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void heap_caps_dump_all(void)
|
||||
{
|
||||
heap_caps_dump(MALLOC_CAP_INVALID);
|
||||
}
|
||||
|
||||
size_t heap_caps_get_allocated_size( void *ptr )
|
||||
{
|
||||
heap_t *heap = find_containing_heap(ptr);
|
||||
size_t size = multi_heap_get_allocated_size(heap->heap, ptr);
|
||||
return size;
|
||||
}
|
||||
|
||||
IRAM_ATTR void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps)
|
||||
{
|
||||
void *ret = NULL;
|
||||
|
||||
if(!alignment) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//Alignment must be a power of two:
|
||||
if((alignment & (alignment - 1)) != 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (size > HEAP_SIZE_MAX) {
|
||||
// Avoids int overflow when adding small numbers to size, or
|
||||
// calculating 'end' from start+size, by limiting 'size' to the possible range
|
||||
heap_caps_alloc_failed(size, caps, __func__);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
|
||||
//Iterate over heaps and check capabilities at this priority
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if (heap->heap == NULL) {
|
||||
continue;
|
||||
}
|
||||
if ((heap->caps[prio] & caps) != 0) {
|
||||
//Heap has at least one of the caps requested. If caps has other bits set that this prio
|
||||
//doesn't cover, see if they're available in other prios.
|
||||
if ((get_all_caps(heap) & caps) == caps) {
|
||||
//Just try to alloc, nothing special.
|
||||
ret = multi_heap_aligned_alloc(heap->heap, size, alignment);
|
||||
if (ret != NULL) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
heap_caps_alloc_failed(size, caps, __func__);
|
||||
|
||||
//Nothing usable found.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
IRAM_ATTR void heap_caps_aligned_free(void *ptr)
|
||||
{
|
||||
heap_caps_free(ptr);
|
||||
}
|
||||
|
||||
void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps)
|
||||
{
|
||||
size_t size_bytes;
|
||||
if (__builtin_mul_overflow(n, size, &size_bytes)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *ptr = heap_caps_aligned_alloc(alignment,size_bytes, caps);
|
||||
if(ptr != NULL) {
|
||||
memset(ptr, 0, size_bytes);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
@@ -1,241 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include "heap_private.h"
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <sys/lock.h>
|
||||
|
||||
#include "esp_log.h"
|
||||
#include "multi_heap.h"
|
||||
#include "multi_heap_platform.h"
|
||||
#include "esp_heap_caps_init.h"
|
||||
#include "soc/soc_memory_layout.h"
|
||||
|
||||
static const char *TAG = "heap_init";
|
||||
|
||||
/* Linked-list of registered heaps */
|
||||
struct registered_heap_ll registered_heaps;
|
||||
|
||||
static void register_heap(heap_t *region)
|
||||
{
|
||||
size_t heap_size = region->end - region->start;
|
||||
assert(heap_size <= HEAP_SIZE_MAX);
|
||||
region->heap = multi_heap_register((void *)region->start, heap_size);
|
||||
if (region->heap != NULL) {
|
||||
ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
|
||||
}
|
||||
}
|
||||
|
||||
void heap_caps_enable_nonos_stack_heaps(void)
|
||||
{
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
// Assume any not-yet-registered heap is
|
||||
// a nonos-stack heap
|
||||
if (heap->heap == NULL) {
|
||||
register_heap(heap);
|
||||
if (heap->heap != NULL) {
|
||||
multi_heap_set_lock(heap->heap, &heap->heap_mux);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize the heap allocator to use all of the memory not
|
||||
used by static data or reserved for other purposes
|
||||
*/
|
||||
void heap_caps_init(void)
|
||||
{
|
||||
/* Get the array of regions that we can use for heaps
|
||||
(with reserved memory removed already.)
|
||||
*/
|
||||
size_t num_regions = soc_get_available_memory_region_max_count();
|
||||
soc_memory_region_t regions[num_regions];
|
||||
num_regions = soc_get_available_memory_regions(regions);
|
||||
|
||||
//The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
|
||||
//it's useful to coalesce adjacent regions that have the same type.
|
||||
for (size_t i = 1; i < num_regions; i++) {
|
||||
soc_memory_region_t *a = ®ions[i - 1];
|
||||
soc_memory_region_t *b = ®ions[i];
|
||||
if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type ) {
|
||||
a->type = -1;
|
||||
b->start = a->start;
|
||||
b->size += a->size;
|
||||
}
|
||||
}
|
||||
|
||||
/* Count the heaps left after merging */
|
||||
size_t num_heaps = 0;
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
if (regions[i].type != -1) {
|
||||
num_heaps++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Start by allocating the registered heap data on the stack.
|
||||
|
||||
Once we have a heap to copy it to, we will copy it to a heap buffer.
|
||||
*/
|
||||
heap_t temp_heaps[num_heaps];
|
||||
size_t heap_idx = 0;
|
||||
|
||||
ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
|
||||
for (size_t i = 0; i < num_regions; i++) {
|
||||
soc_memory_region_t *region = ®ions[i];
|
||||
const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
|
||||
heap_t *heap = &temp_heaps[heap_idx];
|
||||
if (region->type == -1) {
|
||||
continue;
|
||||
}
|
||||
heap_idx++;
|
||||
assert(heap_idx <= num_heaps);
|
||||
|
||||
memcpy(heap->caps, type->caps, sizeof(heap->caps));
|
||||
heap->start = region->start;
|
||||
heap->end = region->start + region->size;
|
||||
MULTI_HEAP_LOCK_INIT(&heap->heap_mux);
|
||||
if (type->startup_stack) {
|
||||
/* Will be registered when OS scheduler starts */
|
||||
heap->heap = NULL;
|
||||
} else {
|
||||
register_heap(heap);
|
||||
}
|
||||
SLIST_NEXT(heap, next) = NULL;
|
||||
|
||||
ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
|
||||
region->start, region->size, region->size / 1024, type->name);
|
||||
}
|
||||
|
||||
assert(heap_idx == num_heaps);
|
||||
|
||||
/* Allocate the permanent heap data that we'll use as a linked list at runtime.
|
||||
|
||||
Allocate this part of data contiguously, even though it's a linked list... */
|
||||
assert(SLIST_EMPTY(®istered_heaps));
|
||||
|
||||
heap_t *heaps_array = NULL;
|
||||
for (size_t i = 0; i < num_heaps; i++) {
|
||||
if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
|
||||
/* use the first DRAM heap which can fit the data */
|
||||
heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
|
||||
if (heaps_array != NULL) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
|
||||
|
||||
memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
|
||||
|
||||
/* Iterate the heaps and set their locks, also add them to the linked list. */
|
||||
for (size_t i = 0; i < num_heaps; i++) {
|
||||
if (heaps_array[i].heap != NULL) {
|
||||
multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
|
||||
}
|
||||
if (i == 0) {
|
||||
SLIST_INSERT_HEAD(®istered_heaps, &heaps_array[0], next);
|
||||
} else {
|
||||
SLIST_INSERT_AFTER(&heaps_array[i-1], &heaps_array[i], next);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
|
||||
{
|
||||
if (start == 0) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < soc_memory_region_count; i++) {
|
||||
const soc_memory_region_t *region = &soc_memory_regions[i];
|
||||
// Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
|
||||
if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
|
||||
const uint32_t *caps = soc_memory_types[region->type].caps;
|
||||
return heap_caps_add_region_with_caps(caps, start, end);
|
||||
}
|
||||
}
|
||||
|
||||
return ESP_ERR_NOT_FOUND;
|
||||
}
|
||||
|
||||
esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
|
||||
{
|
||||
esp_err_t err = ESP_FAIL;
|
||||
if (caps == NULL || start == 0 || end == 0 || end <= start) {
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
//Check if region overlaps the start and/or end of an existing region. If so, the
|
||||
//region is invalid (or maybe added twice)
|
||||
/*
|
||||
* assume that in on region, start must be less than end (cannot equal to) !!
|
||||
* Specially, the 4th scenario can be allowed. For example, allocate memory from heap,
|
||||
* then change the capability and call this function to create a new region for special
|
||||
* application.
|
||||
* In the following chart, 'start = start' and 'end = end' is contained in 3rd scenario.
|
||||
* This all equal scenario is incorrect because the same region cannot be add twice. For example,
|
||||
* add the .bss memory to region twice, if not do the check, it will cause exception.
|
||||
*
|
||||
* the existing heap region s(tart) e(nd)
|
||||
* |----------------------|
|
||||
* 1.add region [Correct] (s1<s && e1<=s) |-----|
|
||||
* 2.add region [Incorrect] (s2<=s && s<e2<=e) |---------------|
|
||||
* 3.add region [Incorrect] (s3<=s && e<e3) |-------------------------------------|
|
||||
* 4 add region [Correct] (s<s4<e && s<e4<=e) |-------|
|
||||
* 5.add region [Incorrect] (s<s5<e && e<e5) |----------------------------|
|
||||
* 6.add region [Correct] (e<=s6 && e<e6) |----|
|
||||
*/
|
||||
|
||||
heap_t *heap;
|
||||
SLIST_FOREACH(heap, ®istered_heaps, next) {
|
||||
if ((start <= heap->start && end > heap->start)
|
||||
|| (start < heap->end && end > heap->end)) {
|
||||
return ESP_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
heap_t *p_new = heap_caps_malloc(sizeof(heap_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
|
||||
if (p_new == NULL) {
|
||||
err = ESP_ERR_NO_MEM;
|
||||
goto done;
|
||||
}
|
||||
memcpy(p_new->caps, caps, sizeof(p_new->caps));
|
||||
p_new->start = start;
|
||||
p_new->end = end;
|
||||
MULTI_HEAP_LOCK_INIT(&p_new->heap_mux);
|
||||
p_new->heap = multi_heap_register((void *)start, end - start);
|
||||
SLIST_NEXT(p_new, next) = NULL;
|
||||
if (p_new->heap == NULL) {
|
||||
err = ESP_ERR_INVALID_SIZE;
|
||||
goto done;
|
||||
}
|
||||
multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
|
||||
|
||||
/* (This insertion is atomic to registered_heaps, so
|
||||
we don't need to worry about thread safety for readers,
|
||||
only for writers. */
|
||||
static multi_heap_lock_t registered_heaps_write_lock = MULTI_HEAP_LOCK_STATIC_INITIALIZER;
|
||||
MULTI_HEAP_LOCK(®istered_heaps_write_lock);
|
||||
SLIST_INSERT_HEAD(®istered_heaps, p_new, next);
|
||||
MULTI_HEAP_UNLOCK(®istered_heaps_write_lock);
|
||||
|
||||
err = ESP_OK;
|
||||
|
||||
done:
|
||||
if (err != ESP_OK) {
|
||||
free(p_new);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <soc/soc_memory_layout.h>
|
||||
#include "multi_heap.h"
|
||||
#include "multi_heap_platform.h"
|
||||
#include "sys/queue.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Some common heap registration data structures used
|
||||
for heap_caps_init.c to share heap information with heap_caps.c
|
||||
*/
|
||||
|
||||
#define HEAP_SIZE_MAX (SOC_MAX_CONTIGUOUS_RAM_SIZE)
|
||||
|
||||
/* Type for describing each registered heap */
|
||||
typedef struct heap_t_ {
|
||||
uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS]; ///< Capabilities for the type of memory in this heap (as a prioritised set). Copied from soc_memory_types so it's in RAM not flash.
|
||||
intptr_t start;
|
||||
intptr_t end;
|
||||
multi_heap_lock_t heap_mux;
|
||||
multi_heap_handle_t heap;
|
||||
SLIST_ENTRY(heap_t_) next;
|
||||
} heap_t;
|
||||
|
||||
/* All registered heaps.
|
||||
|
||||
Forms a single linked list, even though most entries are contiguous.
|
||||
This means at the expense of 4 bytes per heap, new heaps can be
|
||||
added at runtime in a fast & thread-safe way.
|
||||
*/
|
||||
extern SLIST_HEAD(registered_heap_ll, heap_t_) registered_heaps;
|
||||
|
||||
bool heap_caps_match(const heap_t *heap, uint32_t caps);
|
||||
|
||||
/* return all possible capabilities (across all priorities) for a given heap */
|
||||
inline static IRAM_ATTR uint32_t get_all_caps(const heap_t *heap)
|
||||
{
|
||||
if (heap->heap == NULL) {
|
||||
return 0;
|
||||
}
|
||||
uint32_t all_caps = 0;
|
||||
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
|
||||
all_caps |= heap->caps[prio];
|
||||
}
|
||||
return all_caps;
|
||||
}
|
||||
|
||||
/*
|
||||
Because we don't want to add _another_ known allocation method to the stack of functions to trace wrt memory tracing,
|
||||
these are declared private. The newlib malloc()/realloc() implementation also calls these, so they are declared
|
||||
separately in newlib/syscalls.c.
|
||||
*/
|
||||
void *heap_caps_realloc_default(void *p, size_t size);
|
||||
void *heap_caps_malloc_default(size_t size);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,129 +0,0 @@
|
||||
// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <freertos/FreeRTOS.h>
|
||||
#include <freertos/task.h>
|
||||
#include <multi_heap.h>
|
||||
#include "multi_heap_internal.h"
|
||||
#include "heap_private.h"
|
||||
#include "esp_heap_task_info.h"
|
||||
|
||||
#ifdef CONFIG_HEAP_TASK_TRACKING
|
||||
|
||||
/*
|
||||
* Return per-task heap allocation totals and lists of blocks.
|
||||
*
|
||||
* For each task that has allocated memory from the heap, return totals for
|
||||
* allocations within regions matching one or more sets of capabilities.
|
||||
*
|
||||
* Optionally also return an array of structs providing details about each
|
||||
* block allocated by one or more requested tasks, or by all tasks.
|
||||
*
|
||||
* Returns the number of block detail structs returned.
|
||||
*/
|
||||
size_t heap_caps_get_per_task_info(heap_task_info_params_t *params)
|
||||
{
|
||||
heap_t *reg;
|
||||
heap_task_block_t *blocks = params->blocks;
|
||||
size_t count = *params->num_totals;
|
||||
size_t remaining = params->max_blocks;
|
||||
|
||||
// Clear out totals for any prepopulated tasks.
|
||||
if (params->totals) {
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
for (size_t type = 0; type < NUM_HEAP_TASK_CAPS; ++type) {
|
||||
params->totals[i].size[type] = 0;
|
||||
params->totals[i].count[type] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SLIST_FOREACH(reg, ®istered_heaps, next) {
|
||||
multi_heap_handle_t heap = reg->heap;
|
||||
if (heap == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Find if the capabilities of this heap region match on of the desired
|
||||
// sets of capabilities.
|
||||
uint32_t caps = get_all_caps(reg);
|
||||
uint32_t type;
|
||||
for (type = 0; type < NUM_HEAP_TASK_CAPS; ++type) {
|
||||
if ((caps & params->mask[type]) == params->caps[type]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (type == NUM_HEAP_TASK_CAPS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
multi_heap_block_handle_t b = multi_heap_get_first_block(heap);
|
||||
multi_heap_internal_lock(heap);
|
||||
for ( ; b ; b = multi_heap_get_next_block(heap, b)) {
|
||||
if (multi_heap_is_free(b)) {
|
||||
continue;
|
||||
}
|
||||
void *p = multi_heap_get_block_address(b); // Safe, only arithmetic
|
||||
size_t bsize = multi_heap_get_allocated_size(heap, p); // Validates
|
||||
TaskHandle_t btask = (TaskHandle_t)multi_heap_get_block_owner(b);
|
||||
|
||||
// Accumulate per-task allocation totals.
|
||||
if (params->totals) {
|
||||
size_t i;
|
||||
for (i = 0; i < count; ++i) {
|
||||
if (params->totals[i].task == btask) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < count) {
|
||||
params->totals[i].size[type] += bsize;
|
||||
params->totals[i].count[type] += 1;
|
||||
}
|
||||
else {
|
||||
if (count < params->max_totals) {
|
||||
params->totals[count].task = btask;
|
||||
params->totals[count].size[type] = bsize;
|
||||
params->totals[i].count[type] = 1;
|
||||
++count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return details about allocated blocks for selected tasks.
|
||||
if (blocks && remaining > 0) {
|
||||
if (params->tasks) {
|
||||
size_t i;
|
||||
for (i = 0; i < params->num_tasks; ++i) {
|
||||
if (btask == params->tasks[i]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == params->num_tasks) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
blocks->task = btask;
|
||||
blocks->address = p;
|
||||
blocks->size = bsize;
|
||||
++blocks;
|
||||
--remaining;
|
||||
}
|
||||
}
|
||||
multi_heap_internal_unlock(heap);
|
||||
}
|
||||
*params->num_totals = count;
|
||||
return params->max_blocks - remaining;
|
||||
}
|
||||
|
||||
#endif // CONFIG_HEAP_TASK_TRACKING
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,119 +0,0 @@
|
||||
/*
|
||||
** Two Level Segregated Fit memory allocator, version 3.1.
|
||||
** Written by Matthew Conte
|
||||
** http://tlsf.baisoku.org
|
||||
**
|
||||
** Based on the original documentation by Miguel Masmano:
|
||||
** http://www.gii.upv.es/tlsf/main/docs
|
||||
**
|
||||
** This implementation was written to the specification
|
||||
** of the document, therefore no GPL restrictions apply.
|
||||
**
|
||||
** Copyright (c) 2006-2016, Matthew Conte
|
||||
** All rights reserved.
|
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without
|
||||
** modification, are permitted provided that the following conditions are met:
|
||||
** * Redistributions of source code must retain the above copyright
|
||||
** notice, this list of conditions and the following disclaimer.
|
||||
** * Redistributions in binary form must reproduce the above copyright
|
||||
** notice, this list of conditions and the following disclaimer in the
|
||||
** documentation and/or other materials provided with the distribution.
|
||||
** * Neither the name of the copyright holder nor the
|
||||
** names of its contributors may be used to endorse or promote products
|
||||
** derived from this software without specific prior written permission.
|
||||
**
|
||||
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
|
||||
** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include <assert.h>
|
||||
#include <limits.h>
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stddef.h>
|
||||
#include "heap_tlsf_config.h"
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
** Cast and min/max macros.
|
||||
*/
|
||||
#define tlsf_cast(t, exp) ((t) (exp))
|
||||
#define tlsf_min(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define tlsf_max(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
/* A type used for casting when doing pointer arithmetic. */
|
||||
typedef ptrdiff_t tlsfptr_t;
|
||||
|
||||
typedef struct block_header_t
|
||||
{
|
||||
/* Points to the previous physical block. */
|
||||
struct block_header_t* prev_phys_block;
|
||||
|
||||
/* The size of this block, excluding the block header. */
|
||||
size_t size;
|
||||
|
||||
/* Next and previous free blocks. */
|
||||
struct block_header_t* next_free;
|
||||
struct block_header_t* prev_free;
|
||||
} block_header_t;
|
||||
|
||||
#include "heap_tlsf_block_functions.h"
|
||||
|
||||
/* tlsf_t: a TLSF structure. Can contain 1 to N pools. */
|
||||
/* pool_t: a block of memory that TLSF can manage. */
|
||||
typedef void* tlsf_t;
|
||||
typedef void* pool_t;
|
||||
|
||||
/* Create/destroy a memory pool. */
|
||||
tlsf_t tlsf_create(void* mem, size_t max_bytes);
|
||||
tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes);
|
||||
pool_t tlsf_get_pool(tlsf_t tlsf);
|
||||
|
||||
/* Add/remove memory pools. */
|
||||
pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes);
|
||||
void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
|
||||
|
||||
/* malloc/memalign/realloc/free replacements. */
|
||||
void* tlsf_malloc(tlsf_t tlsf, size_t size);
|
||||
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size);
|
||||
void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t offset);
|
||||
void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
|
||||
void tlsf_free(tlsf_t tlsf, void* ptr);
|
||||
|
||||
/* Returns internal block size, not original request size */
|
||||
size_t tlsf_block_size(void* ptr);
|
||||
|
||||
/* Overheads/limits of internal structures. */
|
||||
size_t tlsf_size(tlsf_t tlsf);
|
||||
size_t tlsf_align_size(void);
|
||||
size_t tlsf_block_size_min(void);
|
||||
size_t tlsf_block_size_max(tlsf_t tlsf);
|
||||
size_t tlsf_pool_overhead(void);
|
||||
size_t tlsf_alloc_overhead(void);
|
||||
size_t tlsf_fit_size(tlsf_t tlsf, size_t size);
|
||||
|
||||
/* Debugging. */
|
||||
typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
|
||||
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);
|
||||
/* Returns nonzero if any internal consistency check fails. */
|
||||
int tlsf_check(tlsf_t tlsf);
|
||||
int tlsf_check_pool(pool_t pool);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
};
|
||||
#endif
|
||||
@@ -1,174 +0,0 @@
|
||||
/*
|
||||
** Two Level Segregated Fit memory allocator, version 3.1.
|
||||
** Written by Matthew Conte
|
||||
** http://tlsf.baisoku.org
|
||||
**
|
||||
** Based on the original documentation by Miguel Masmano:
|
||||
** http://www.gii.upv.es/tlsf/main/docs
|
||||
**
|
||||
** This implementation was written to the specification
|
||||
** of the document, therefore no GPL restrictions apply.
|
||||
**
|
||||
** Copyright (c) 2006-2016, Matthew Conte
|
||||
** All rights reserved.
|
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without
|
||||
** modification, are permitted provided that the following conditions are met:
|
||||
** * Redistributions of source code must retain the above copyright
|
||||
** notice, this list of conditions and the following disclaimer.
|
||||
** * Redistributions in binary form must reproduce the above copyright
|
||||
** notice, this list of conditions and the following disclaimer in the
|
||||
** documentation and/or other materials provided with the distribution.
|
||||
** * Neither the name of the copyright holder nor the
|
||||
** names of its contributors may be used to endorse or promote products
|
||||
** derived from this software without specific prior written permission.
|
||||
**
|
||||
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
|
||||
** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
/*
|
||||
** Data structures and associated constants.
|
||||
*/
|
||||
|
||||
/*
|
||||
** Since block sizes are always at least a multiple of 4, the two least
|
||||
** significant bits of the size field are used to store the block status:
|
||||
** - bit 0: whether block is busy or free
|
||||
** - bit 1: whether previous block is busy or free
|
||||
*/
|
||||
#define block_header_free_bit (1 << 0)
|
||||
#define block_header_prev_free_bit (1 << 1)
|
||||
|
||||
/*
|
||||
** The size of the block header exposed to used blocks is the size field.
|
||||
** The prev_phys_block field is stored *inside* the previous free block.
|
||||
*/
|
||||
#define block_header_overhead (sizeof(size_t))
|
||||
|
||||
/* User data starts directly after the size field in a used block. */
|
||||
#define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t))
|
||||
|
||||
/*
|
||||
** A free block must be large enough to store its header minus the size of
|
||||
** the prev_phys_block field, and no larger than the number of addressable
|
||||
** bits for FL_INDEX.
|
||||
** The block_size_max macro returns the maximum block for the minimum pool
|
||||
** use tlsf_block_size_max for a value specific to the pool
|
||||
*/
|
||||
#define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*))
|
||||
#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN)
|
||||
|
||||
/*
|
||||
** block_header_t member functions.
|
||||
*/
|
||||
static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block)
|
||||
{
|
||||
return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) void block_set_size(block_header_t* block, size_t size)
|
||||
{
|
||||
const size_t oldsize = block->size;
|
||||
block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) int block_is_last(const block_header_t* block)
|
||||
{
|
||||
return block_size(block) == 0;
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) int block_is_free(const block_header_t* block)
|
||||
{
|
||||
return tlsf_cast(int, block->size & block_header_free_bit);
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) void block_set_free(block_header_t* block)
|
||||
{
|
||||
block->size |= block_header_free_bit;
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) void block_set_used(block_header_t* block)
|
||||
{
|
||||
block->size &= ~block_header_free_bit;
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) int block_is_prev_free(const block_header_t* block)
|
||||
{
|
||||
return tlsf_cast(int, block->size & block_header_prev_free_bit);
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) void block_set_prev_free(block_header_t* block)
|
||||
{
|
||||
block->size |= block_header_prev_free_bit;
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) void block_set_prev_used(block_header_t* block)
|
||||
{
|
||||
block->size &= ~block_header_prev_free_bit;
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) block_header_t* block_from_ptr(const void* ptr)
|
||||
{
|
||||
return tlsf_cast(block_header_t*,
|
||||
tlsf_cast(unsigned char*, ptr) - block_start_offset);
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) void* block_to_ptr(const block_header_t* block)
|
||||
{
|
||||
return tlsf_cast(void*,
|
||||
tlsf_cast(unsigned char*, block) + block_start_offset);
|
||||
}
|
||||
|
||||
/* Return location of next block after block of given size. */
|
||||
static inline __attribute__((__always_inline__)) block_header_t* offset_to_block(const void* ptr, size_t size)
|
||||
{
|
||||
return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
|
||||
}
|
||||
|
||||
/* Return location of previous block. */
|
||||
static inline __attribute__((__always_inline__)) block_header_t* block_prev(const block_header_t* block)
|
||||
{
|
||||
return block->prev_phys_block;
|
||||
}
|
||||
|
||||
/* Return location of next existing block. */
|
||||
static inline __attribute__((__always_inline__)) block_header_t* block_next(const block_header_t* block)
|
||||
{
|
||||
block_header_t* next = offset_to_block(block_to_ptr(block),
|
||||
block_size(block) - block_header_overhead);
|
||||
return next;
|
||||
}
|
||||
|
||||
/* Link a new block with its physical neighbor, return the neighbor. */
|
||||
static inline __attribute__((__always_inline__)) block_header_t* block_link_next(block_header_t* block)
|
||||
{
|
||||
block_header_t* next = block_next(block);
|
||||
next->prev_phys_block = block;
|
||||
return next;
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) void block_mark_as_free(block_header_t* block)
|
||||
{
|
||||
/* Link the block to the next block, first. */
|
||||
block_header_t* next = block_link_next(block);
|
||||
block_set_prev_free(next);
|
||||
block_set_free(block);
|
||||
}
|
||||
|
||||
static inline __attribute__((__always_inline__)) void block_mark_as_used(block_header_t* block)
|
||||
{
|
||||
block_header_t* next = block_next(block);
|
||||
block_set_prev_used(next);
|
||||
block_set_used(block);
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
/*
|
||||
** Two Level Segregated Fit memory allocator, version 3.1.
|
||||
** Written by Matthew Conte
|
||||
** http://tlsf.baisoku.org
|
||||
**
|
||||
** Based on the original documentation by Miguel Masmano:
|
||||
** http://www.gii.upv.es/tlsf/main/docs
|
||||
**
|
||||
** This implementation was written to the specification
|
||||
** of the document, therefore no GPL restrictions apply.
|
||||
**
|
||||
** Copyright (c) 2006-2016, Matthew Conte
|
||||
** All rights reserved.
|
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without
|
||||
** modification, are permitted provided that the following conditions are met:
|
||||
** * Redistributions of source code must retain the above copyright
|
||||
** notice, this list of conditions and the following disclaimer.
|
||||
** * Redistributions in binary form must reproduce the above copyright
|
||||
** notice, this list of conditions and the following disclaimer in the
|
||||
** documentation and/or other materials provided with the distribution.
|
||||
** * Neither the name of the copyright holder nor the
|
||||
** names of its contributors may be used to endorse or promote products
|
||||
** derived from this software without specific prior written permission.
|
||||
**
|
||||
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
|
||||
** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
enum tlsf_config
|
||||
{
|
||||
/* log2 of number of linear subdivisions of block sizes. Larger
|
||||
** values require more memory in the control structure. Values of
|
||||
** 4 or 5 are typical, 3 is for very small pools.
|
||||
*/
|
||||
SL_INDEX_COUNT_LOG2_MIN = 3,
|
||||
|
||||
/* All allocation sizes and addresses are aligned to 4 bytes. */
|
||||
ALIGN_SIZE_LOG2 = 2,
|
||||
ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
|
||||
|
||||
/*
|
||||
** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
|
||||
** However, because we linearly subdivide the second-level lists, and
|
||||
** our minimum size granularity is 4 bytes, it doesn't make sense to
|
||||
** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
|
||||
** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
|
||||
** trying to split size ranges into more slots than we have available.
|
||||
** Instead, we calculate the minimum threshold size, and place all
|
||||
** blocks below that size into the 0th first-level list.
|
||||
** Values below are the absolute minimum to accept a pool addition
|
||||
*/
|
||||
FL_INDEX_MAX_MIN = 14, // For a less than 16kB pool
|
||||
SL_INDEX_COUNT_MIN = (1 << SL_INDEX_COUNT_LOG2_MIN),
|
||||
FL_INDEX_COUNT_MIN = (FL_INDEX_MAX_MIN - (SL_INDEX_COUNT_LOG2_MIN + ALIGN_SIZE_LOG2) + 1),
|
||||
};
|
||||
@@ -1,255 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <string.h>
|
||||
#include <sdkconfig.h>
|
||||
|
||||
#define HEAP_TRACE_SRCFILE /* don't warn on inclusion here */
|
||||
#include "esp_heap_trace.h"
|
||||
#undef HEAP_TRACE_SRCFILE
|
||||
|
||||
#include "esp_attr.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
|
||||
|
||||
#define STACK_DEPTH CONFIG_HEAP_TRACING_STACK_DEPTH
|
||||
|
||||
#if CONFIG_HEAP_TRACING_STANDALONE
|
||||
|
||||
static portMUX_TYPE trace_mux = portMUX_INITIALIZER_UNLOCKED;
|
||||
static bool tracing;
|
||||
static heap_trace_mode_t mode;
|
||||
|
||||
/* Buffer used for records, starting at offset 0
|
||||
*/
|
||||
static heap_trace_record_t *buffer;
|
||||
static size_t total_records;
|
||||
|
||||
/* Count of entries logged in the buffer.
|
||||
|
||||
Maximum total_records
|
||||
*/
|
||||
static size_t count;
|
||||
|
||||
/* Actual number of allocations logged */
|
||||
static size_t total_allocations;
|
||||
|
||||
/* Actual number of frees logged */
|
||||
static size_t total_frees;
|
||||
|
||||
/* Has the buffer overflowed and lost trace entries? */
|
||||
static bool has_overflowed = false;
|
||||
|
||||
esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records)
|
||||
{
|
||||
if (tracing) {
|
||||
return ESP_ERR_INVALID_STATE;
|
||||
}
|
||||
buffer = record_buffer;
|
||||
total_records = num_records;
|
||||
memset(buffer, 0, num_records * sizeof(heap_trace_record_t));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
|
||||
{
|
||||
if (buffer == NULL || total_records == 0) {
|
||||
return ESP_ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
portENTER_CRITICAL(&trace_mux);
|
||||
|
||||
tracing = false;
|
||||
mode = mode_param;
|
||||
count = 0;
|
||||
total_allocations = 0;
|
||||
total_frees = 0;
|
||||
has_overflowed = false;
|
||||
heap_trace_resume();
|
||||
|
||||
portEXIT_CRITICAL(&trace_mux);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static esp_err_t set_tracing(bool enable)
|
||||
{
|
||||
if (tracing == enable) {
|
||||
return ESP_ERR_INVALID_STATE;
|
||||
}
|
||||
tracing = enable;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_stop(void)
|
||||
{
|
||||
return set_tracing(false);
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_resume(void)
|
||||
{
|
||||
return set_tracing(true);
|
||||
}
|
||||
|
||||
size_t heap_trace_get_count(void)
|
||||
{
|
||||
return count;
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record)
|
||||
{
|
||||
if (record == NULL) {
|
||||
return ESP_ERR_INVALID_STATE;
|
||||
}
|
||||
esp_err_t result = ESP_OK;
|
||||
|
||||
portENTER_CRITICAL(&trace_mux);
|
||||
if (index >= count) {
|
||||
result = ESP_ERR_INVALID_ARG; /* out of range for 'count' */
|
||||
} else {
|
||||
memcpy(record, &buffer[index], sizeof(heap_trace_record_t));
|
||||
}
|
||||
portEXIT_CRITICAL(&trace_mux);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void heap_trace_dump(void)
|
||||
{
|
||||
size_t delta_size = 0;
|
||||
size_t delta_allocs = 0;
|
||||
printf("%u allocations trace (%u entry buffer)\n",
|
||||
count, total_records);
|
||||
size_t start_count = count;
|
||||
for (int i = 0; i < count; i++) {
|
||||
heap_trace_record_t *rec = &buffer[i];
|
||||
|
||||
if (rec->address != NULL) {
|
||||
printf("%d bytes (@ %p) allocated CPU %d ccount 0x%08x caller ",
|
||||
rec->size, rec->address, rec->ccount & 1, rec->ccount & ~3);
|
||||
for (int j = 0; j < STACK_DEPTH && rec->alloced_by[j] != 0; j++) {
|
||||
printf("%p%s", rec->alloced_by[j],
|
||||
(j < STACK_DEPTH - 1) ? ":" : "");
|
||||
}
|
||||
|
||||
if (mode != HEAP_TRACE_ALL || STACK_DEPTH == 0 || rec->freed_by[0] == NULL) {
|
||||
delta_size += rec->size;
|
||||
delta_allocs++;
|
||||
printf("\n");
|
||||
} else {
|
||||
printf("\nfreed by ");
|
||||
for (int j = 0; j < STACK_DEPTH; j++) {
|
||||
printf("%p%s", rec->freed_by[j],
|
||||
(j < STACK_DEPTH - 1) ? ":" : "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (mode == HEAP_TRACE_ALL) {
|
||||
printf("%u bytes alive in trace (%u/%u allocations)\n",
|
||||
delta_size, delta_allocs, heap_trace_get_count());
|
||||
} else {
|
||||
printf("%u bytes 'leaked' in trace (%u allocations)\n", delta_size, delta_allocs);
|
||||
}
|
||||
printf("total allocations %u total frees %u\n", total_allocations, total_frees);
|
||||
if (start_count != count) { // only a problem if trace isn't stopped before dumping
|
||||
printf("(NB: New entries were traced while dumping, so trace dump may have duplicate entries.)\n");
|
||||
}
|
||||
if (has_overflowed) {
|
||||
printf("(NB: Buffer has overflowed, so trace data is incomplete.)\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* Add a new allocation to the heap trace records */
|
||||
static IRAM_ATTR void record_allocation(const heap_trace_record_t *record)
|
||||
{
|
||||
if (!tracing || record->address == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
portENTER_CRITICAL(&trace_mux);
|
||||
if (tracing) {
|
||||
if (count == total_records) {
|
||||
has_overflowed = true;
|
||||
/* Move the whole buffer back one slot.
|
||||
|
||||
This is a bit slow, compared to treating this buffer as a ringbuffer and rotating a head pointer.
|
||||
|
||||
However, ringbuffer code gets tricky when we remove elements in mid-buffer (for leak trace mode) while
|
||||
trying to keep track of an item count that may overflow.
|
||||
*/
|
||||
memmove(&buffer[0], &buffer[1], sizeof(heap_trace_record_t) * (total_records -1));
|
||||
count--;
|
||||
}
|
||||
// Copy new record into place
|
||||
memcpy(&buffer[count], record, sizeof(heap_trace_record_t));
|
||||
count++;
|
||||
total_allocations++;
|
||||
}
|
||||
portEXIT_CRITICAL(&trace_mux);
|
||||
}
|
||||
|
||||
// remove a record, used when freeing
|
||||
static void remove_record(int index);
|
||||
|
||||
/* record a free event in the heap trace log
|
||||
|
||||
For HEAP_TRACE_ALL, this means filling in the freed_by pointer.
|
||||
For HEAP_TRACE_LEAKS, this means removing the record from the log.
|
||||
*/
|
||||
static IRAM_ATTR void record_free(void *p, void **callers)
|
||||
{
|
||||
if (!tracing || p == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
portENTER_CRITICAL(&trace_mux);
|
||||
if (tracing && count > 0) {
|
||||
total_frees++;
|
||||
/* search backwards for the allocation record matching this free */
|
||||
int i;
|
||||
for (i = count - 1; i >= 0; i--) {
|
||||
if (buffer[i].address == p) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i >= 0) {
|
||||
if (mode == HEAP_TRACE_ALL) {
|
||||
memcpy(buffer[i].freed_by, callers, sizeof(void *) * STACK_DEPTH);
|
||||
} else { // HEAP_TRACE_LEAKS
|
||||
// Leak trace mode, once an allocation is freed we remove it from the list
|
||||
remove_record(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
portEXIT_CRITICAL(&trace_mux);
|
||||
}
|
||||
|
||||
/* remove the entry at 'index' from the ringbuffer of saved records */
|
||||
static IRAM_ATTR void remove_record(int index)
|
||||
{
|
||||
if (index < count - 1) {
|
||||
// Remove the buffer entry from the list
|
||||
memmove(&buffer[index], &buffer[index+1],
|
||||
sizeof(heap_trace_record_t) * (total_records - index - 1));
|
||||
} else {
|
||||
// For last element, just zero it out to avoid ambiguity
|
||||
memset(&buffer[index], 0, sizeof(heap_trace_record_t));
|
||||
}
|
||||
count--;
|
||||
}
|
||||
|
||||
#include "heap_trace.inc"
|
||||
|
||||
#endif /*CONFIG_HEAP_TRACING_STANDALONE*/
|
||||
@@ -1,402 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include "multi_heap.h"
|
||||
#include <sdkconfig.h>
|
||||
#include "esp_err.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Flags to indicate the capabilities of the various memory systems
|
||||
*/
|
||||
#define MALLOC_CAP_EXEC (1<<0) ///< Memory must be able to run executable code
|
||||
#define MALLOC_CAP_32BIT (1<<1) ///< Memory must allow for aligned 32-bit data accesses
|
||||
#define MALLOC_CAP_8BIT (1<<2) ///< Memory must allow for 8/16/...-bit data accesses
|
||||
#define MALLOC_CAP_DMA (1<<3) ///< Memory must be able to accessed by DMA
|
||||
#define MALLOC_CAP_PID2 (1<<4) ///< Memory must be mapped to PID2 memory space (PIDs are not currently used)
|
||||
#define MALLOC_CAP_PID3 (1<<5) ///< Memory must be mapped to PID3 memory space (PIDs are not currently used)
|
||||
#define MALLOC_CAP_PID4 (1<<6) ///< Memory must be mapped to PID4 memory space (PIDs are not currently used)
|
||||
#define MALLOC_CAP_PID5 (1<<7) ///< Memory must be mapped to PID5 memory space (PIDs are not currently used)
|
||||
#define MALLOC_CAP_PID6 (1<<8) ///< Memory must be mapped to PID6 memory space (PIDs are not currently used)
|
||||
#define MALLOC_CAP_PID7 (1<<9) ///< Memory must be mapped to PID7 memory space (PIDs are not currently used)
|
||||
#define MALLOC_CAP_SPIRAM (1<<10) ///< Memory must be in SPI RAM
|
||||
#define MALLOC_CAP_INTERNAL (1<<11) ///< Memory must be internal; specifically it should not disappear when flash/spiram cache is switched off
|
||||
#define MALLOC_CAP_DEFAULT (1<<12) ///< Memory can be returned in a non-capability-specific memory allocation (e.g. malloc(), calloc()) call
|
||||
#define MALLOC_CAP_IRAM_8BIT (1<<13) ///< Memory must be in IRAM and allow unaligned access
|
||||
#define MALLOC_CAP_RETENTION (1<<14)
|
||||
|
||||
#define MALLOC_CAP_INVALID (1<<31) ///< Memory can't be used / list end marker
|
||||
|
||||
/**
|
||||
* @brief callback called when a allocation operation fails, if registered
|
||||
* @param size in bytes of failed allocation
|
||||
* @param caps capabillites requested of failed allocation
|
||||
* @param function_name function which generated the failure
|
||||
*/
|
||||
typedef void (*esp_alloc_failed_hook_t) (size_t size, uint32_t caps, const char * function_name);
|
||||
|
||||
/**
|
||||
* @brief registers a callback function to be invoked if a memory allocation operation fails
|
||||
* @param callback caller defined callback to be invoked
|
||||
* @return ESP_OK if callback was registered.
|
||||
*/
|
||||
esp_err_t heap_caps_register_failed_alloc_callback(esp_alloc_failed_hook_t callback);
|
||||
|
||||
/**
|
||||
* @brief Allocate a chunk of memory which has the given capabilities
|
||||
*
|
||||
* Equivalent semantics to libc malloc(), for capability-aware memory.
|
||||
*
|
||||
* In IDF, ``malloc(p)`` is equivalent to ``heap_caps_malloc(p, MALLOC_CAP_8BIT)``.
|
||||
*
|
||||
* @param size Size, in bytes, of the amount of memory to allocate
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory to be returned
|
||||
*
|
||||
* @return A pointer to the memory allocated on success, NULL on failure
|
||||
*/
|
||||
void *heap_caps_malloc(size_t size, uint32_t caps);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Free memory previously allocated via heap_caps_malloc() or heap_caps_realloc().
|
||||
*
|
||||
* Equivalent semantics to libc free(), for capability-aware memory.
|
||||
*
|
||||
* In IDF, ``free(p)`` is equivalent to ``heap_caps_free(p)``.
|
||||
*
|
||||
* @param ptr Pointer to memory previously returned from heap_caps_malloc() or heap_caps_realloc(). Can be NULL.
|
||||
*/
|
||||
void heap_caps_free( void *ptr);
|
||||
|
||||
/**
|
||||
* @brief Reallocate memory previously allocated via heap_caps_malloc() or heap_caps_realloc().
|
||||
*
|
||||
* Equivalent semantics to libc realloc(), for capability-aware memory.
|
||||
*
|
||||
* In IDF, ``realloc(p, s)`` is equivalent to ``heap_caps_realloc(p, s, MALLOC_CAP_8BIT)``.
|
||||
*
|
||||
* 'caps' parameter can be different to the capabilities that any original 'ptr' was allocated with. In this way,
|
||||
* realloc can be used to "move" a buffer if necessary to ensure it meets a new set of capabilities.
|
||||
*
|
||||
* @param ptr Pointer to previously allocated memory, or NULL for a new allocation.
|
||||
* @param size Size of the new buffer requested, or 0 to free the buffer.
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory desired for the new allocation.
|
||||
*
|
||||
* @return Pointer to a new buffer of size 'size' with capabilities 'caps', or NULL if allocation failed.
|
||||
*/
|
||||
void *heap_caps_realloc( void *ptr, size_t size, uint32_t caps);
|
||||
|
||||
/**
|
||||
* @brief Allocate a aligned chunk of memory which has the given capabilities
|
||||
*
|
||||
* Equivalent semantics to libc aligned_alloc(), for capability-aware memory.
|
||||
* @param alignment How the pointer received needs to be aligned
|
||||
* must be a power of two
|
||||
* @param size Size, in bytes, of the amount of memory to allocate
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory to be returned
|
||||
*
|
||||
* @return A pointer to the memory allocated on success, NULL on failure
|
||||
*
|
||||
*
|
||||
*/
|
||||
void *heap_caps_aligned_alloc(size_t alignment, size_t size, uint32_t caps);
|
||||
|
||||
/**
|
||||
* @brief Used to deallocate memory previously allocated with heap_caps_aligned_alloc
|
||||
*
|
||||
* @param ptr Pointer to the memory allocated
|
||||
* @note This function is deprecated, plase consider using heap_caps_free() instead
|
||||
*/
|
||||
void __attribute__((deprecated)) heap_caps_aligned_free(void *ptr);
|
||||
|
||||
/**
|
||||
* @brief Allocate a aligned chunk of memory which has the given capabilities. The initialized value in the memory is set to zero.
|
||||
*
|
||||
* @param alignment How the pointer received needs to be aligned
|
||||
* must be a power of two
|
||||
* @param n Number of continuing chunks of memory to allocate
|
||||
* @param size Size, in bytes, of a chunk of memory to allocate
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory to be returned
|
||||
*
|
||||
* @return A pointer to the memory allocated on success, NULL on failure
|
||||
*
|
||||
*/
|
||||
void *heap_caps_aligned_calloc(size_t alignment, size_t n, size_t size, uint32_t caps);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Allocate a chunk of memory which has the given capabilities. The initialized value in the memory is set to zero.
|
||||
*
|
||||
* Equivalent semantics to libc calloc(), for capability-aware memory.
|
||||
*
|
||||
* In IDF, ``calloc(p)`` is equivalent to ``heap_caps_calloc(p, MALLOC_CAP_8BIT)``.
|
||||
*
|
||||
* @param n Number of continuing chunks of memory to allocate
|
||||
* @param size Size, in bytes, of a chunk of memory to allocate
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory to be returned
|
||||
*
|
||||
* @return A pointer to the memory allocated on success, NULL on failure
|
||||
*/
|
||||
void *heap_caps_calloc(size_t n, size_t size, uint32_t caps);
|
||||
|
||||
/**
|
||||
* @brief Get the total size of all the regions that have the given capabilities
|
||||
*
|
||||
* This function takes all regions capable of having the given capabilities allocated in them
|
||||
* and adds up the total space they have.
|
||||
*
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory
|
||||
*
|
||||
* @return total size in bytes
|
||||
*/
|
||||
|
||||
size_t heap_caps_get_total_size(uint32_t caps);
|
||||
|
||||
/**
|
||||
* @brief Get the total free size of all the regions that have the given capabilities
|
||||
*
|
||||
* This function takes all regions capable of having the given capabilities allocated in them
|
||||
* and adds up the free space they have.
|
||||
*
|
||||
* Note that because of heap fragmentation it is probably not possible to allocate a single block of memory
|
||||
* of this size. Use heap_caps_get_largest_free_block() for this purpose.
|
||||
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory
|
||||
*
|
||||
* @return Amount of free bytes in the regions
|
||||
*/
|
||||
size_t heap_caps_get_free_size( uint32_t caps );
|
||||
|
||||
|
||||
/**
|
||||
* @brief Get the total minimum free memory of all regions with the given capabilities
|
||||
*
|
||||
* This adds all the low water marks of the regions capable of delivering the memory
|
||||
* with the given capabilities.
|
||||
*
|
||||
* Note the result may be less than the global all-time minimum available heap of this kind, as "low water marks" are
|
||||
* tracked per-region. Individual regions' heaps may have reached their "low water marks" at different points in time. However
|
||||
* this result still gives a "worst case" indication for all-time minimum free heap.
|
||||
*
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory
|
||||
*
|
||||
* @return Amount of free bytes in the regions
|
||||
*/
|
||||
size_t heap_caps_get_minimum_free_size( uint32_t caps );
|
||||
|
||||
/**
|
||||
* @brief Get the largest free block of memory able to be allocated with the given capabilities.
|
||||
*
|
||||
* Returns the largest value of ``s`` for which ``heap_caps_malloc(s, caps)`` will succeed.
|
||||
*
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory
|
||||
*
|
||||
* @return Size of largest free block in bytes.
|
||||
*/
|
||||
size_t heap_caps_get_largest_free_block( uint32_t caps );
|
||||
|
||||
|
||||
/**
|
||||
* @brief Get heap info for all regions with the given capabilities.
|
||||
*
|
||||
* Calls multi_heap_info() on all heaps which share the given capabilities. The information returned is an aggregate
|
||||
* across all matching heaps. The meanings of fields are the same as defined for multi_heap_info_t, except that
|
||||
* ``minimum_free_bytes`` has the same caveats described in heap_caps_get_minimum_free_size().
|
||||
*
|
||||
* @param info Pointer to a structure which will be filled with relevant
|
||||
* heap metadata.
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory
|
||||
*
|
||||
*/
|
||||
void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps );
|
||||
|
||||
|
||||
/**
|
||||
* @brief Print a summary of all memory with the given capabilities.
|
||||
*
|
||||
* Calls multi_heap_info on all heaps which share the given capabilities, and
|
||||
* prints a two-line summary for each, then a total summary.
|
||||
*
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory
|
||||
*
|
||||
*/
|
||||
void heap_caps_print_heap_info( uint32_t caps );
|
||||
|
||||
/**
|
||||
* @brief Check integrity of all heap memory in the system.
|
||||
*
|
||||
* Calls multi_heap_check on all heaps. Optionally print errors if heaps are corrupt.
|
||||
*
|
||||
* Calling this function is equivalent to calling heap_caps_check_integrity
|
||||
* with the caps argument set to MALLOC_CAP_INVALID.
|
||||
*
|
||||
* @param print_errors Print specific errors if heap corruption is found.
|
||||
*
|
||||
* @return True if all heaps are valid, False if at least one heap is corrupt.
|
||||
*/
|
||||
bool heap_caps_check_integrity_all(bool print_errors);
|
||||
|
||||
/**
|
||||
* @brief Check integrity of all heaps with the given capabilities.
|
||||
*
|
||||
* Calls multi_heap_check on all heaps which share the given capabilities. Optionally
|
||||
* print errors if the heaps are corrupt.
|
||||
*
|
||||
* See also heap_caps_check_integrity_all to check all heap memory
|
||||
* in the system and heap_caps_check_integrity_addr to check memory
|
||||
* around a single address.
|
||||
*
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory
|
||||
* @param print_errors Print specific errors if heap corruption is found.
|
||||
*
|
||||
* @return True if all heaps are valid, False if at least one heap is corrupt.
|
||||
*/
|
||||
bool heap_caps_check_integrity(uint32_t caps, bool print_errors);
|
||||
|
||||
/**
|
||||
* @brief Check integrity of heap memory around a given address.
|
||||
*
|
||||
* This function can be used to check the integrity of a single region of heap memory,
|
||||
* which contains the given address.
|
||||
*
|
||||
* This can be useful if debugging heap integrity for corruption at a known address,
|
||||
* as it has a lower overhead than checking all heap regions. Note that if the corrupt
|
||||
* address moves around between runs (due to timing or other factors) then this approach
|
||||
* won't work and you should call heap_caps_check_integrity or
|
||||
* heap_caps_check_integrity_all instead.
|
||||
*
|
||||
* @note The entire heap region around the address is checked, not only the adjacent
|
||||
* heap blocks.
|
||||
*
|
||||
* @param addr Address in memory. Check for corruption in region containing this address.
|
||||
* @param print_errors Print specific errors if heap corruption is found.
|
||||
*
|
||||
* @return True if the heap containing the specified address is valid,
|
||||
* False if at least one heap is corrupt or the address doesn't belong to a heap region.
|
||||
*/
|
||||
bool heap_caps_check_integrity_addr(intptr_t addr, bool print_errors);
|
||||
|
||||
/**
|
||||
* @brief Enable malloc() in external memory and set limit below which
|
||||
* malloc() attempts are placed in internal memory.
|
||||
*
|
||||
* When external memory is in use, the allocation strategy is to initially try to
|
||||
* satisfy smaller allocation requests with internal memory and larger requests
|
||||
* with external memory. This sets the limit between the two, as well as generally
|
||||
* enabling allocation in external memory.
|
||||
*
|
||||
* @param limit Limit, in bytes.
|
||||
*/
|
||||
void heap_caps_malloc_extmem_enable(size_t limit);
|
||||
|
||||
/**
|
||||
* @brief Allocate a chunk of memory as preference in decreasing order.
|
||||
*
|
||||
* @attention The variable parameters are bitwise OR of MALLOC_CAP_* flags indicating the type of memory.
|
||||
* This API prefers to allocate memory with the first parameter. If failed, allocate memory with
|
||||
* the next parameter. It will try in this order until allocating a chunk of memory successfully
|
||||
* or fail to allocate memories with any of the parameters.
|
||||
*
|
||||
* @param size Size, in bytes, of the amount of memory to allocate
|
||||
* @param num Number of variable paramters
|
||||
*
|
||||
* @return A pointer to the memory allocated on success, NULL on failure
|
||||
*/
|
||||
void *heap_caps_malloc_prefer( size_t size, size_t num, ... );
|
||||
|
||||
/**
|
||||
* @brief Allocate a chunk of memory as preference in decreasing order.
|
||||
*
|
||||
* @param ptr Pointer to previously allocated memory, or NULL for a new allocation.
|
||||
* @param size Size of the new buffer requested, or 0 to free the buffer.
|
||||
* @param num Number of variable paramters
|
||||
*
|
||||
* @return Pointer to a new buffer of size 'size', or NULL if allocation failed.
|
||||
*/
|
||||
void *heap_caps_realloc_prefer( void *ptr, size_t size, size_t num, ... );
|
||||
|
||||
/**
|
||||
* @brief Allocate a chunk of memory as preference in decreasing order.
|
||||
*
|
||||
* @param n Number of continuing chunks of memory to allocate
|
||||
* @param size Size, in bytes, of a chunk of memory to allocate
|
||||
* @param num Number of variable paramters
|
||||
*
|
||||
* @return A pointer to the memory allocated on success, NULL on failure
|
||||
*/
|
||||
void *heap_caps_calloc_prefer( size_t n, size_t size, size_t num, ... );
|
||||
|
||||
/**
|
||||
* @brief Dump the full structure of all heaps with matching capabilities.
|
||||
*
|
||||
* Prints a large amount of output to serial (because of locking limitations,
|
||||
* the output bypasses stdout/stderr). For each (variable sized) block
|
||||
* in each matching heap, the following output is printed on a single line:
|
||||
*
|
||||
* - Block address (the data buffer returned by malloc is 4 bytes after this
|
||||
* if heap debugging is set to Basic, or 8 bytes otherwise).
|
||||
* - Data size (the data size may be larger than the size requested by malloc,
|
||||
* either due to heap fragmentation or because of heap debugging level).
|
||||
* - Address of next block in the heap.
|
||||
* - If the block is free, the address of the next free block is also printed.
|
||||
*
|
||||
* @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type
|
||||
* of memory
|
||||
*/
|
||||
void heap_caps_dump(uint32_t caps);
|
||||
|
||||
/**
|
||||
* @brief Dump the full structure of all heaps.
|
||||
*
|
||||
* Covers all registered heaps. Prints a large amount of output to serial.
|
||||
*
|
||||
* Output is the same as for heap_caps_dump.
|
||||
*
|
||||
*/
|
||||
void heap_caps_dump_all(void);
|
||||
|
||||
/**
|
||||
* @brief Return the size that a particular pointer was allocated with.
|
||||
*
|
||||
* @param ptr Pointer to currently allocated heap memory. Must be a pointer value previously
|
||||
* returned by heap_caps_malloc,malloc,calloc, etc. and not yet freed.
|
||||
*
|
||||
* @note The app will crash with an assertion failure if the pointer is not valid.
|
||||
*
|
||||
* @return Size of the memory allocated at this block.
|
||||
*
|
||||
*/
|
||||
size_t heap_caps_get_allocated_size( void *ptr );
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,92 +0,0 @@
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#include "esp_err.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "soc/soc_memory_layout.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Initialize the capability-aware heap allocator.
|
||||
*
|
||||
* This is called once in the IDF startup code. Do not call it
|
||||
* at other times.
|
||||
*/
|
||||
void heap_caps_init(void);
|
||||
|
||||
/**
|
||||
* @brief Enable heap(s) in memory regions where the startup stacks are located.
|
||||
*
|
||||
* On startup, the pro/app CPUs have a certain memory region they use as stack, so we
|
||||
* cannot do allocations in the regions these stack frames are. When FreeRTOS is
|
||||
* completely started, they do not use that memory anymore and heap(s) there can
|
||||
* be enabled.
|
||||
*/
|
||||
void heap_caps_enable_nonos_stack_heaps(void);
|
||||
|
||||
/**
|
||||
* @brief Add a region of memory to the collection of heaps at runtime.
|
||||
*
|
||||
* Most memory regions are defined in soc_memory_layout.c for the SoC,
|
||||
* and are registered via heap_caps_init(). Some regions can't be used
|
||||
* immediately and are later enabled via heap_caps_enable_nonos_stack_heaps().
|
||||
*
|
||||
* Call this function to add a region of memory to the heap at some later time.
|
||||
*
|
||||
* This function does not consider any of the "reserved" regions or other data in soc_memory_layout, caller needs to
|
||||
* consider this themselves.
|
||||
*
|
||||
* All memory within the region specified by start & end parameters must be otherwise unused.
|
||||
*
|
||||
* The capabilities of the newly registered memory will be determined by the start address, as looked up in the regions
|
||||
* specified in soc_memory_layout.c.
|
||||
*
|
||||
* Use heap_caps_add_region_with_caps() to register a region with custom capabilities.
|
||||
*
|
||||
* @param start Start address of new region.
|
||||
* @param end End address of new region.
|
||||
*
|
||||
* @return ESP_OK on success, ESP_ERR_INVALID_ARG if a parameter is invalid, ESP_ERR_NOT_FOUND if the
|
||||
* specified start address doesn't reside in a known region, or any error returned by heap_caps_add_region_with_caps().
|
||||
*/
|
||||
esp_err_t heap_caps_add_region(intptr_t start, intptr_t end);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Add a region of memory to the collection of heaps at runtime, with custom capabilities.
|
||||
*
|
||||
* Similar to heap_caps_add_region(), only custom memory capabilities are specified by the caller.
|
||||
*
|
||||
* @param caps Ordered array of capability masks for the new region, in order of priority. Must have length
|
||||
* SOC_MEMORY_TYPE_NO_PRIOS. Does not need to remain valid after the call returns.
|
||||
* @param start Start address of new region.
|
||||
* @param end End address of new region.
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK on success
|
||||
* - ESP_ERR_INVALID_ARG if a parameter is invalid
|
||||
* - ESP_ERR_NO_MEM if no memory to register new heap.
|
||||
* - ESP_ERR_INVALID_SIZE if the memory region is too small to fit a heap
|
||||
* - ESP_FAIL if region overlaps the start and/or end of an existing region
|
||||
*/
|
||||
esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,98 +0,0 @@
|
||||
// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#ifdef CONFIG_HEAP_TASK_TRACKING
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// This macro controls how much space is provided for partitioning the per-task
|
||||
// heap allocation info according to one or more sets of heap capabilities.
|
||||
#define NUM_HEAP_TASK_CAPS 4
|
||||
|
||||
/** @brief Structure to collect per-task heap allocation totals partitioned by selected caps */
|
||||
typedef struct {
|
||||
TaskHandle_t task; ///< Task to which these totals belong
|
||||
size_t size[NUM_HEAP_TASK_CAPS]; ///< Total allocations partitioned by selected caps
|
||||
size_t count[NUM_HEAP_TASK_CAPS]; ///< Number of blocks partitioned by selected caps
|
||||
} heap_task_totals_t;
|
||||
|
||||
/** @brief Structure providing details about a block allocated by a task */
|
||||
typedef struct {
|
||||
TaskHandle_t task; ///< Task that allocated the block
|
||||
void *address; ///< User address of allocated block
|
||||
uint32_t size; ///< Size of the allocated block
|
||||
} heap_task_block_t;
|
||||
|
||||
/** @brief Structure to provide parameters to heap_caps_get_per_task_info
|
||||
*
|
||||
* The 'caps' and 'mask' arrays allow partitioning the per-task heap allocation
|
||||
* totals by selected sets of heap region capabilities so that totals for
|
||||
* multiple regions can be accumulated in one scan. The capabilities flags for
|
||||
* each region ANDed with mask[i] are compared to caps[i] in order; the
|
||||
* allocations in that region are added to totals->size[i] and totals->count[i]
|
||||
* for the first i that matches. To collect the totals without any
|
||||
* partitioning, set mask[0] and caps[0] both to zero. The allocation totals
|
||||
* are returned in the 'totals' array of heap_task_totals_t structs. To allow
|
||||
* easily comparing the totals array between consecutive calls, that array can
|
||||
* be left populated from one call to the next so the order of tasks is the
|
||||
* same even if some tasks have freed their blocks or have been deleted. The
|
||||
* number of blocks prepopulated is given by num_totals, which is updated upon
|
||||
* return. If there are more tasks with allocations than the capacity of the
|
||||
* totals array (given by max_totals), information for the excess tasks will be
|
||||
* not be collected. The totals array pointer can be NULL if the totals are
|
||||
* not desired.
|
||||
*
|
||||
* The 'tasks' array holds a list of handles for tasks whose block details are
|
||||
* to be returned in the 'blocks' array of heap_task_block_t structs. If the
|
||||
* tasks array pointer is NULL, block details for all tasks will be returned up
|
||||
* to the capacity of the buffer array, given by max_blocks. The function
|
||||
* return value tells the number of blocks filled into the array. The blocks
|
||||
* array pointer can be NULL if block details are not desired, or max_blocks
|
||||
* can be set to zero.
|
||||
*/
|
||||
typedef struct {
|
||||
int32_t caps[NUM_HEAP_TASK_CAPS]; ///< Array of caps for partitioning task totals
|
||||
int32_t mask[NUM_HEAP_TASK_CAPS]; ///< Array of masks under which caps must match
|
||||
TaskHandle_t *tasks; ///< Array of tasks whose block info is returned
|
||||
size_t num_tasks; ///< Length of tasks array
|
||||
heap_task_totals_t *totals; ///< Array of structs to collect task totals
|
||||
size_t *num_totals; ///< Number of task structs currently in array
|
||||
size_t max_totals; ///< Capacity of array of task totals structs
|
||||
heap_task_block_t *blocks; ///< Array of task block details structs
|
||||
size_t max_blocks; ///< Capacity of array of task block info structs
|
||||
} heap_task_info_params_t;
|
||||
|
||||
/**
|
||||
* @brief Return per-task heap allocation totals and lists of blocks.
|
||||
*
|
||||
* For each task that has allocated memory from the heap, return totals for
|
||||
* allocations within regions matching one or more sets of capabilities.
|
||||
*
|
||||
* Optionally also return an array of structs providing details about each
|
||||
* block allocated by one or more requested tasks, or by all tasks.
|
||||
*
|
||||
* @param params Structure to hold all the parameters for the function
|
||||
* (@see heap_task_info_params_t).
|
||||
* @return Number of block detail structs returned (@see heap_task_block_t).
|
||||
*/
|
||||
extern size_t heap_caps_get_per_task_info(heap_task_info_params_t *params);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // CONFIG_HEAP_TASK_TRACKING
|
||||
@@ -1,154 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#include "sdkconfig.h"
|
||||
#include <stdint.h>
|
||||
#include <esp_err.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_HEAP_TRACING) && !defined(HEAP_TRACE_SRCFILE)
|
||||
#warning "esp_heap_trace.h is included but heap tracing is disabled in menuconfig, functions are no-ops"
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HEAP_TRACING_STACK_DEPTH
|
||||
#define CONFIG_HEAP_TRACING_STACK_DEPTH 0
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
HEAP_TRACE_ALL,
|
||||
HEAP_TRACE_LEAKS,
|
||||
} heap_trace_mode_t;
|
||||
|
||||
/**
|
||||
* @brief Trace record data type. Stores information about an allocated region of memory.
|
||||
*/
|
||||
typedef struct {
|
||||
uint32_t ccount; ///< CCOUNT of the CPU when the allocation was made. LSB (bit value 1) is the CPU number (0 or 1).
|
||||
void *address; ///< Address which was allocated
|
||||
size_t size; ///< Size of the allocation
|
||||
void *alloced_by[CONFIG_HEAP_TRACING_STACK_DEPTH]; ///< Call stack of the caller which allocated the memory.
|
||||
void *freed_by[CONFIG_HEAP_TRACING_STACK_DEPTH]; ///< Call stack of the caller which freed the memory (all zero if not freed.)
|
||||
} heap_trace_record_t;
|
||||
|
||||
/**
|
||||
* @brief Initialise heap tracing in standalone mode.
|
||||
*
|
||||
* This function must be called before any other heap tracing functions.
|
||||
*
|
||||
* To disable heap tracing and allow the buffer to be freed, stop tracing and then call heap_trace_init_standalone(NULL, 0);
|
||||
*
|
||||
* @param record_buffer Provide a buffer to use for heap trace data. Must remain valid any time heap tracing is enabled, meaning
|
||||
* it must be allocated from internal memory not in PSRAM.
|
||||
* @param num_records Size of the heap trace buffer, as number of record structures.
|
||||
* @return
|
||||
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
|
||||
* - ESP_ERR_INVALID_STATE Heap tracing is currently in progress.
|
||||
* - ESP_OK Heap tracing initialised successfully.
|
||||
*/
|
||||
esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records);
|
||||
|
||||
/**
|
||||
* @brief Initialise heap tracing in host-based mode.
|
||||
*
|
||||
* This function must be called before any other heap tracing functions.
|
||||
*
|
||||
* @return
|
||||
* - ESP_ERR_INVALID_STATE Heap tracing is currently in progress.
|
||||
* - ESP_OK Heap tracing initialised successfully.
|
||||
*/
|
||||
esp_err_t heap_trace_init_tohost(void);
|
||||
|
||||
/**
|
||||
* @brief Start heap tracing. All heap allocations & frees will be traced, until heap_trace_stop() is called.
|
||||
*
|
||||
* @note heap_trace_init_standalone() must be called to provide a valid buffer, before this function is called.
|
||||
*
|
||||
* @note Calling this function while heap tracing is running will reset the heap trace state and continue tracing.
|
||||
*
|
||||
* @param mode Mode for tracing.
|
||||
* - HEAP_TRACE_ALL means all heap allocations and frees are traced.
|
||||
* - HEAP_TRACE_LEAKS means only suspected memory leaks are traced. (When memory is freed, the record is removed from the trace buffer.)
|
||||
* @return
|
||||
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
|
||||
* - ESP_ERR_INVALID_STATE A non-zero-length buffer has not been set via heap_trace_init_standalone().
|
||||
* - ESP_OK Tracing is started.
|
||||
*/
|
||||
esp_err_t heap_trace_start(heap_trace_mode_t mode);
|
||||
|
||||
/**
|
||||
* @brief Stop heap tracing.
|
||||
*
|
||||
* @return
|
||||
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
|
||||
* - ESP_ERR_INVALID_STATE Heap tracing was not in progress.
|
||||
* - ESP_OK Heap tracing stopped..
|
||||
*/
|
||||
esp_err_t heap_trace_stop(void);
|
||||
|
||||
/**
|
||||
* @brief Resume heap tracing which was previously stopped.
|
||||
*
|
||||
* Unlike heap_trace_start(), this function does not clear the
|
||||
* buffer of any pre-existing trace records.
|
||||
*
|
||||
* The heap trace mode is the same as when heap_trace_start() was
|
||||
* last called (or HEAP_TRACE_ALL if heap_trace_start() was never called).
|
||||
*
|
||||
* @return
|
||||
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
|
||||
* - ESP_ERR_INVALID_STATE Heap tracing was already started.
|
||||
* - ESP_OK Heap tracing resumed.
|
||||
*/
|
||||
esp_err_t heap_trace_resume(void);
|
||||
|
||||
/**
|
||||
* @brief Return number of records in the heap trace buffer
|
||||
*
|
||||
* It is safe to call this function while heap tracing is running.
|
||||
*/
|
||||
size_t heap_trace_get_count(void);
|
||||
|
||||
/**
|
||||
* @brief Return a raw record from the heap trace buffer
|
||||
*
|
||||
* @note It is safe to call this function while heap tracing is running, however in HEAP_TRACE_LEAK mode record indexing may
|
||||
* skip entries unless heap tracing is stopped first.
|
||||
*
|
||||
* @param index Index (zero-based) of the record to return.
|
||||
* @param[out] record Record where the heap trace record will be copied.
|
||||
* @return
|
||||
* - ESP_ERR_NOT_SUPPORTED Project was compiled without heap tracing enabled in menuconfig.
|
||||
* - ESP_ERR_INVALID_STATE Heap tracing was not initialised.
|
||||
* - ESP_ERR_INVALID_ARG Index is out of bounds for current heap trace record count.
|
||||
* - ESP_OK Record returned successfully.
|
||||
*/
|
||||
esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record);
|
||||
|
||||
/**
|
||||
* @brief Dump heap trace record data to stdout
|
||||
*
|
||||
* @note It is safe to call this function while heap tracing is running, however in HEAP_TRACE_LEAK mode the dump may skip
|
||||
* entries unless heap tracing is stopped first.
|
||||
*
|
||||
*
|
||||
*/
|
||||
void heap_trace_dump(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,200 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <string.h>
|
||||
#include <sdkconfig.h>
|
||||
#include "soc/soc_memory_layout.h"
|
||||
#include "esp_attr.h"
|
||||
|
||||
/* Encode the CPU ID in the LSB of the ccount value */
|
||||
inline static uint32_t get_ccount(void)
|
||||
{
|
||||
uint32_t ccount = cpu_hal_get_cycle_count() & ~3;
|
||||
#ifndef CONFIG_FREERTOS_UNICORE
|
||||
ccount |= xPortGetCoreID();
|
||||
#endif
|
||||
return ccount;
|
||||
}
|
||||
|
||||
/* Architecture-specific return value of __builtin_return_address which
|
||||
* should be interpreted as an invalid address.
|
||||
*/
|
||||
#ifdef __XTENSA__
|
||||
#define HEAP_ARCH_INVALID_PC 0x40000000
|
||||
#else
|
||||
#define HEAP_ARCH_INVALID_PC 0x00000000
|
||||
#endif
|
||||
|
||||
// Caller is 2 stack frames deeper than we care about
|
||||
#define STACK_OFFSET 2
|
||||
|
||||
#define TEST_STACK(N) do { \
|
||||
if (STACK_DEPTH == N) { \
|
||||
return; \
|
||||
} \
|
||||
callers[N] = __builtin_return_address(N+STACK_OFFSET); \
|
||||
if (!esp_ptr_executable(callers[N]) \
|
||||
|| callers[N] == (void*) HEAP_ARCH_INVALID_PC) { \
|
||||
callers[N] = 0; \
|
||||
return; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
/* Static function to read the call stack for a traced heap call.
|
||||
|
||||
Calls to __builtin_return_address are "unrolled" via TEST_STACK macro as gcc requires the
|
||||
argument to be a compile-time constant.
|
||||
*/
|
||||
static IRAM_ATTR __attribute__((noinline)) void get_call_stack(void **callers)
|
||||
{
|
||||
bzero(callers, sizeof(void *) * STACK_DEPTH);
|
||||
TEST_STACK(0);
|
||||
TEST_STACK(1);
|
||||
TEST_STACK(2);
|
||||
TEST_STACK(3);
|
||||
TEST_STACK(4);
|
||||
TEST_STACK(5);
|
||||
TEST_STACK(6);
|
||||
TEST_STACK(7);
|
||||
TEST_STACK(8);
|
||||
TEST_STACK(9);
|
||||
}
|
||||
|
||||
_Static_assert(STACK_DEPTH >= 0 && STACK_DEPTH <= 10, "CONFIG_HEAP_TRACING_STACK_DEPTH must be in range 0-10");
|
||||
|
||||
|
||||
typedef enum {
|
||||
TRACE_MALLOC_CAPS,
|
||||
TRACE_MALLOC_DEFAULT
|
||||
} trace_malloc_mode_t;
|
||||
|
||||
|
||||
void *__real_heap_caps_malloc(size_t size, uint32_t caps);
|
||||
void *__real_heap_caps_malloc_default( size_t size );
|
||||
void *__real_heap_caps_realloc_default( void *ptr, size_t size );
|
||||
|
||||
/* trace any 'malloc' event */
|
||||
static IRAM_ATTR __attribute__((noinline)) void *trace_malloc(size_t size, uint32_t caps, trace_malloc_mode_t mode)
|
||||
{
|
||||
uint32_t ccount = get_ccount();
|
||||
void *p;
|
||||
|
||||
if ( mode == TRACE_MALLOC_CAPS ) {
|
||||
p = __real_heap_caps_malloc(size, caps);
|
||||
} else { //TRACE_MALLOC_DEFAULT
|
||||
p = __real_heap_caps_malloc_default(size);
|
||||
}
|
||||
|
||||
heap_trace_record_t rec = {
|
||||
.address = p,
|
||||
.ccount = ccount,
|
||||
.size = size,
|
||||
};
|
||||
get_call_stack(rec.alloced_by);
|
||||
record_allocation(&rec);
|
||||
return p;
|
||||
}
|
||||
|
||||
void __real_heap_caps_free(void *p);
|
||||
|
||||
/* trace any 'free' event */
|
||||
static IRAM_ATTR __attribute__((noinline)) void trace_free(void *p)
|
||||
{
|
||||
void *callers[STACK_DEPTH];
|
||||
get_call_stack(callers);
|
||||
record_free(p, callers);
|
||||
|
||||
__real_heap_caps_free(p);
|
||||
}
|
||||
|
||||
void * __real_heap_caps_realloc(void *p, size_t size, uint32_t caps);
|
||||
|
||||
/* trace any 'realloc' event */
|
||||
static IRAM_ATTR __attribute__((noinline)) void *trace_realloc(void *p, size_t size, uint32_t caps, trace_malloc_mode_t mode)
|
||||
{
|
||||
void *callers[STACK_DEPTH];
|
||||
uint32_t ccount = get_ccount();
|
||||
void *r;
|
||||
|
||||
/* trace realloc as free-then-alloc */
|
||||
get_call_stack(callers);
|
||||
record_free(p, callers);
|
||||
|
||||
if (mode == TRACE_MALLOC_CAPS ) {
|
||||
r = __real_heap_caps_realloc(p, size, caps);
|
||||
} else { //TRACE_MALLOC_DEFAULT
|
||||
r = __real_heap_caps_realloc_default(p, size);
|
||||
}
|
||||
/* realloc with zero size is a free */
|
||||
if (size != 0) {
|
||||
heap_trace_record_t rec = {
|
||||
.address = r,
|
||||
.ccount = ccount,
|
||||
.size = size,
|
||||
};
|
||||
memcpy(rec.alloced_by, callers, sizeof(void *) * STACK_DEPTH);
|
||||
record_allocation(&rec);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Note: this changes the behaviour of libc malloc/realloc/free a bit,
|
||||
as they no longer go via the libc functions in ROM. But more or less
|
||||
the same in the end. */
|
||||
|
||||
IRAM_ATTR void *__wrap_malloc(size_t size)
|
||||
{
|
||||
return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
|
||||
}
|
||||
|
||||
IRAM_ATTR void __wrap_free(void *p)
|
||||
{
|
||||
trace_free(p);
|
||||
}
|
||||
|
||||
IRAM_ATTR void *__wrap_realloc(void *p, size_t size)
|
||||
{
|
||||
return trace_realloc(p, size, 0, TRACE_MALLOC_DEFAULT);
|
||||
}
|
||||
|
||||
IRAM_ATTR void *__wrap_calloc(size_t nmemb, size_t size)
|
||||
{
|
||||
size = size * nmemb;
|
||||
void *result = trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
|
||||
if (result != NULL) {
|
||||
memset(result, 0, size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
IRAM_ATTR void *__wrap_heap_caps_malloc(size_t size, uint32_t caps)
|
||||
{
|
||||
return trace_malloc(size, caps, TRACE_MALLOC_CAPS);
|
||||
}
|
||||
|
||||
void __wrap_heap_caps_free(void *p) __attribute__((alias("__wrap_free")));
|
||||
|
||||
IRAM_ATTR void *__wrap_heap_caps_realloc(void *p, size_t size, uint32_t caps)
|
||||
{
|
||||
return trace_realloc(p, size, caps, TRACE_MALLOC_CAPS);
|
||||
}
|
||||
|
||||
IRAM_ATTR void *__wrap_heap_caps_malloc_default( size_t size )
|
||||
{
|
||||
return trace_malloc(size, 0, TRACE_MALLOC_DEFAULT);
|
||||
}
|
||||
|
||||
IRAM_ATTR void *__wrap_heap_caps_realloc_default( void *ptr, size_t size )
|
||||
{
|
||||
return trace_realloc(ptr, size, 0, TRACE_MALLOC_DEFAULT);
|
||||
}
|
||||
@@ -1,190 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/* multi_heap is a heap implementation for handling multiple
|
||||
heterogenous heaps in a single program.
|
||||
|
||||
Any contiguous block of memory can be registered as a heap.
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** @brief Opaque handle to a registered heap */
|
||||
typedef struct multi_heap_info *multi_heap_handle_t;
|
||||
|
||||
/**
|
||||
* @brief allocate a chunk of memory with specific alignment
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param size size in bytes of memory chunk
|
||||
* @param alignment how the memory must be aligned
|
||||
*
|
||||
* @return pointer to the memory allocated, NULL on failure
|
||||
*/
|
||||
void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment);
|
||||
|
||||
/** @brief malloc() a buffer in a given heap
|
||||
*
|
||||
* Semantics are the same as standard malloc(), only the returned buffer will be allocated in the specified heap.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param size Size of desired buffer.
|
||||
*
|
||||
* @return Pointer to new memory, or NULL if allocation fails.
|
||||
*/
|
||||
void *multi_heap_malloc(multi_heap_handle_t heap, size_t size);
|
||||
|
||||
/** @brief free() a buffer aligned in a given heap.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param p NULL, or a pointer previously returned from multi_heap_aligned_alloc() for the same heap.
|
||||
* @note This function is deprecated, consider using multi_heap_free() instead
|
||||
*/
|
||||
void __attribute__((deprecated)) multi_heap_aligned_free(multi_heap_handle_t heap, void *p);
|
||||
|
||||
/** @brief free() a buffer in a given heap.
|
||||
*
|
||||
* Semantics are the same as standard free(), only the argument 'p' must be NULL or have been allocated in the specified heap.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param p NULL, or a pointer previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
|
||||
*/
|
||||
void multi_heap_free(multi_heap_handle_t heap, void *p);
|
||||
|
||||
/** @brief realloc() a buffer in a given heap.
|
||||
*
|
||||
* Semantics are the same as standard realloc(), only the argument 'p' must be NULL or have been allocated in the specified heap.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param p NULL, or a pointer previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
|
||||
* @param size Desired new size for buffer.
|
||||
*
|
||||
* @return New buffer of 'size' containing contents of 'p', or NULL if reallocation failed.
|
||||
*/
|
||||
void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size);
|
||||
|
||||
|
||||
/** @brief Return the size that a particular pointer was allocated with.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param p Pointer, must have been previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap.
|
||||
*
|
||||
* @return Size of the memory allocated at this block. May be more than the original size argument, due
|
||||
* to padding and minimum block sizes.
|
||||
*/
|
||||
size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p);
|
||||
|
||||
|
||||
/** @brief Register a new heap for use
|
||||
*
|
||||
* This function initialises a heap at the specified address, and returns a handle for future heap operations.
|
||||
*
|
||||
* There is no equivalent function for deregistering a heap - if all blocks in the heap are free, you can immediately start using the memory for other purposes.
|
||||
*
|
||||
* @param start Start address of the memory to use for a new heap.
|
||||
* @param size Size (in bytes) of the new heap.
|
||||
*
|
||||
* @return Handle of a new heap ready for use, or NULL if the heap region was too small to be initialised.
|
||||
*/
|
||||
multi_heap_handle_t multi_heap_register(void *start, size_t size);
|
||||
|
||||
|
||||
/** @brief Associate a private lock pointer with a heap
|
||||
*
|
||||
* The lock argument is supplied to the MULTI_HEAP_LOCK() and MULTI_HEAP_UNLOCK() macros, defined in multi_heap_platform.h.
|
||||
*
|
||||
* The lock in question must be recursive.
|
||||
*
|
||||
* When the heap is first registered, the associated lock is NULL.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param lock Optional pointer to a locking structure to associate with this heap.
|
||||
*/
|
||||
void multi_heap_set_lock(multi_heap_handle_t heap, void* lock);
|
||||
|
||||
/** @brief Dump heap information to stdout
|
||||
*
|
||||
* For debugging purposes, this function dumps information about every block in the heap to stdout.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
*/
|
||||
void multi_heap_dump(multi_heap_handle_t heap);
|
||||
|
||||
/** @brief Check heap integrity
|
||||
*
|
||||
* Walks the heap and checks all heap data structures are valid. If any errors are detected, an error-specific message
|
||||
* can be optionally printed to stderr. Print behaviour can be overriden at compile time by defining
|
||||
* MULTI_CHECK_FAIL_PRINTF in multi_heap_platform.h.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param print_errors If true, errors will be printed to stderr.
|
||||
* @return true if heap is valid, false otherwise.
|
||||
*/
|
||||
bool multi_heap_check(multi_heap_handle_t heap, bool print_errors);
|
||||
|
||||
/** @brief Return free heap size
|
||||
*
|
||||
* Returns the number of bytes available in the heap.
|
||||
*
|
||||
* Equivalent to the total_free_bytes member returned by multi_heap_get_heap_info().
|
||||
*
|
||||
* Note that the heap may be fragmented, so the actual maximum size for a single malloc() may be lower. To know this
|
||||
* size, see the largest_free_block member returned by multi_heap_get_heap_info().
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @return Number of free bytes.
|
||||
*/
|
||||
size_t multi_heap_free_size(multi_heap_handle_t heap);
|
||||
|
||||
/** @brief Return the lifetime minimum free heap size
|
||||
*
|
||||
* Equivalent to the minimum_free_bytes member returned by multi_heap_get_info().
|
||||
*
|
||||
* Returns the lifetime "low water mark" of possible values returned from multi_free_heap_size(), for the specified
|
||||
* heap.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @return Number of free bytes.
|
||||
*/
|
||||
size_t multi_heap_minimum_free_size(multi_heap_handle_t heap);
|
||||
|
||||
/** @brief Structure to access heap metadata via multi_heap_get_info */
|
||||
typedef struct {
|
||||
size_t total_free_bytes; ///< Total free bytes in the heap. Equivalent to multi_free_heap_size().
|
||||
size_t total_allocated_bytes; ///< Total bytes allocated to data in the heap.
|
||||
size_t largest_free_block; ///< Size of largest free block in the heap. This is the largest malloc-able size.
|
||||
size_t minimum_free_bytes; ///< Lifetime minimum free heap size. Equivalent to multi_minimum_free_heap_size().
|
||||
size_t allocated_blocks; ///< Number of (variable size) blocks allocated in the heap.
|
||||
size_t free_blocks; ///< Number of (variable size) free blocks in the heap.
|
||||
size_t total_blocks; ///< Total number of (variable size) blocks in the heap.
|
||||
} multi_heap_info_t;
|
||||
|
||||
/** @brief Return metadata about a given heap
|
||||
*
|
||||
* Fills a multi_heap_info_t structure with information about the specified heap.
|
||||
*
|
||||
* @param heap Handle to a registered heap.
|
||||
* @param info Pointer to a structure to fill with heap metadata.
|
||||
*/
|
||||
void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,7 +0,0 @@
|
||||
[mapping:heap]
|
||||
archive: libheap.a
|
||||
entries:
|
||||
heap_tlsf (noflash)
|
||||
multi_heap (noflash)
|
||||
if HEAP_POISONING_DISABLED = n:
|
||||
multi_heap_poisoning (noflash)
|
||||
@@ -1,376 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/cdefs.h>
|
||||
#include "heap_tlsf.h"
|
||||
#include <multi_heap.h>
|
||||
#include "multi_heap_internal.h"
|
||||
|
||||
/* Note: Keep platform-specific parts in this header, this source
|
||||
file should depend on libc only */
|
||||
#include "multi_heap_platform.h"
|
||||
|
||||
/* Defines compile-time configuration macros */
|
||||
#include "multi_heap_config.h"
|
||||
|
||||
#ifndef MULTI_HEAP_POISONING
|
||||
/* if no heap poisoning, public API aliases directly to these implementations */
|
||||
void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
|
||||
__attribute__((alias("multi_heap_malloc_impl")));
|
||||
|
||||
void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment)
|
||||
__attribute__((alias("multi_heap_aligned_alloc_impl")));
|
||||
|
||||
void multi_heap_aligned_free(multi_heap_handle_t heap, void *p)
|
||||
__attribute__((alias("multi_heap_free_impl")));
|
||||
|
||||
void multi_heap_free(multi_heap_handle_t heap, void *p)
|
||||
__attribute__((alias("multi_heap_free_impl")));
|
||||
|
||||
void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size)
|
||||
__attribute__((alias("multi_heap_realloc_impl")));
|
||||
|
||||
size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
|
||||
__attribute__((alias("multi_heap_get_allocated_size_impl")));
|
||||
|
||||
multi_heap_handle_t multi_heap_register(void *start, size_t size)
|
||||
__attribute__((alias("multi_heap_register_impl")));
|
||||
|
||||
void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info)
|
||||
__attribute__((alias("multi_heap_get_info_impl")));
|
||||
|
||||
size_t multi_heap_free_size(multi_heap_handle_t heap)
|
||||
__attribute__((alias("multi_heap_free_size_impl")));
|
||||
|
||||
size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
|
||||
__attribute__((alias("multi_heap_minimum_free_size_impl")));
|
||||
|
||||
void *multi_heap_get_block_address(multi_heap_block_handle_t block)
|
||||
__attribute__((alias("multi_heap_get_block_address_impl")));
|
||||
|
||||
void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define ALIGN(X) ((X) & ~(sizeof(void *)-1))
|
||||
#define ALIGN_UP(X) ALIGN((X)+sizeof(void *)-1)
|
||||
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
|
||||
|
||||
|
||||
typedef struct multi_heap_info {
|
||||
void *lock;
|
||||
size_t free_bytes;
|
||||
size_t minimum_free_bytes;
|
||||
size_t pool_size;
|
||||
tlsf_t heap_data;
|
||||
} heap_t;
|
||||
|
||||
/* Return true if this block is free. */
|
||||
static inline bool is_free(const block_header_t *block)
|
||||
{
|
||||
return ((block->size & 0x01) != 0);
|
||||
}
|
||||
|
||||
/* Data size of the block (excludes this block's header) */
|
||||
static inline size_t block_data_size(const block_header_t *block)
|
||||
{
|
||||
return (block->size & ~0x03);
|
||||
}
|
||||
|
||||
/* Check a block is valid for this heap. Used to verify parameters. */
|
||||
static void assert_valid_block(const heap_t *heap, const block_header_t *block)
|
||||
{
|
||||
pool_t pool = tlsf_get_pool(heap->heap_data);
|
||||
void *ptr = block_to_ptr(block);
|
||||
|
||||
MULTI_HEAP_ASSERT((ptr >= pool) &&
|
||||
(ptr < pool + heap->pool_size),
|
||||
(uintptr_t)ptr);
|
||||
}
|
||||
|
||||
void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block)
|
||||
{
|
||||
void *ptr = block_to_ptr(block);
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
|
||||
{
|
||||
return tlsf_block_size(p);
|
||||
}
|
||||
|
||||
multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
|
||||
{
|
||||
assert(start_ptr);
|
||||
if(size < (tlsf_size(NULL) + tlsf_block_size_min() + sizeof(heap_t))) {
|
||||
//Region too small to be a heap.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
heap_t *result = (heap_t *)start_ptr;
|
||||
size -= sizeof(heap_t);
|
||||
|
||||
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, 0);
|
||||
if(!result->heap_data) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result->lock = NULL;
|
||||
result->free_bytes = size - tlsf_size(result->heap_data);
|
||||
result->pool_size = size;
|
||||
result->minimum_free_bytes = result->free_bytes;
|
||||
return result;
|
||||
}
|
||||
|
||||
void multi_heap_set_lock(multi_heap_handle_t heap, void *lock)
|
||||
{
|
||||
heap->lock = lock;
|
||||
}
|
||||
|
||||
void inline multi_heap_internal_lock(multi_heap_handle_t heap)
|
||||
{
|
||||
MULTI_HEAP_LOCK(heap->lock);
|
||||
}
|
||||
|
||||
void inline multi_heap_internal_unlock(multi_heap_handle_t heap)
|
||||
{
|
||||
MULTI_HEAP_UNLOCK(heap->lock);
|
||||
}
|
||||
|
||||
multi_heap_block_handle_t multi_heap_get_first_block(multi_heap_handle_t heap)
|
||||
{
|
||||
assert(heap != NULL);
|
||||
pool_t pool = tlsf_get_pool(heap->heap_data);
|
||||
block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
|
||||
|
||||
return (multi_heap_block_handle_t)block;
|
||||
}
|
||||
|
||||
multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, multi_heap_block_handle_t block)
|
||||
{
|
||||
assert(heap != NULL);
|
||||
assert_valid_block(heap, block);
|
||||
block_header_t* next = block_next(block);
|
||||
|
||||
if(block_data_size(next) == 0) {
|
||||
//Last block:
|
||||
return NULL;
|
||||
} else {
|
||||
return (multi_heap_block_handle_t)next;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool multi_heap_is_free(multi_heap_block_handle_t block)
|
||||
{
|
||||
return is_free(block);
|
||||
}
|
||||
|
||||
void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size)
|
||||
{
|
||||
if (size == 0 || heap == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
void *result = tlsf_malloc(heap->heap_data, size);
|
||||
if(result) {
|
||||
heap->free_bytes -= tlsf_block_size(result);
|
||||
if (heap->free_bytes < heap->minimum_free_bytes) {
|
||||
heap->minimum_free_bytes = heap->free_bytes;
|
||||
}
|
||||
}
|
||||
multi_heap_internal_unlock(heap);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void multi_heap_free_impl(multi_heap_handle_t heap, void *p)
|
||||
{
|
||||
|
||||
if (heap == NULL || p == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert_valid_block(heap, p);
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
heap->free_bytes += tlsf_block_size(p);
|
||||
tlsf_free(heap->heap_data, p);
|
||||
multi_heap_internal_unlock(heap);
|
||||
}
|
||||
|
||||
void *multi_heap_realloc_impl(multi_heap_handle_t heap, void *p, size_t size)
|
||||
{
|
||||
assert(heap != NULL);
|
||||
|
||||
if (p == NULL) {
|
||||
return multi_heap_malloc_impl(heap, size);
|
||||
}
|
||||
|
||||
assert_valid_block(heap, p);
|
||||
|
||||
if (heap == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
size_t previous_block_size = tlsf_block_size(p);
|
||||
void *result = tlsf_realloc(heap->heap_data, p, size);
|
||||
if(result) {
|
||||
heap->free_bytes += previous_block_size;
|
||||
heap->free_bytes -= tlsf_block_size(result);
|
||||
if (heap->free_bytes < heap->minimum_free_bytes) {
|
||||
heap->minimum_free_bytes = heap->free_bytes;
|
||||
}
|
||||
}
|
||||
|
||||
multi_heap_internal_unlock(heap);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void *multi_heap_aligned_alloc_impl_offs(multi_heap_handle_t heap, size_t size, size_t alignment, size_t offset)
|
||||
{
|
||||
if(heap == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(!size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//Alignment must be a power of two:
|
||||
if(((alignment & (alignment - 1)) != 0) ||(!alignment)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
void *result = tlsf_memalign_offs(heap->heap_data, alignment, size, offset);
|
||||
if(result) {
|
||||
heap->free_bytes -= tlsf_block_size(result);
|
||||
if(heap->free_bytes < heap->minimum_free_bytes) {
|
||||
heap->minimum_free_bytes = heap->free_bytes;
|
||||
}
|
||||
}
|
||||
multi_heap_internal_unlock(heap);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment)
|
||||
{
|
||||
return multi_heap_aligned_alloc_impl_offs(heap, size, alignment, 0);
|
||||
}
|
||||
|
||||
bool multi_heap_check(multi_heap_handle_t heap, bool print_errors)
|
||||
{
|
||||
(void)print_errors;
|
||||
bool valid = true;
|
||||
assert(heap != NULL);
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
if(tlsf_check(heap->heap_data)) {
|
||||
valid = false;
|
||||
}
|
||||
|
||||
if(tlsf_check_pool(tlsf_get_pool(heap->heap_data))) {
|
||||
valid = false;
|
||||
}
|
||||
|
||||
multi_heap_internal_unlock(heap);
|
||||
return valid;
|
||||
}
|
||||
|
||||
static void multi_heap_dump_tlsf(void* ptr, size_t size, int used, void* user)
|
||||
{
|
||||
(void)user;
|
||||
MULTI_HEAP_STDERR_PRINTF("Block %p data, size: %d bytes, Free: %s \n",
|
||||
(void *)ptr,
|
||||
size,
|
||||
used ? "No" : "Yes");
|
||||
}
|
||||
|
||||
void multi_heap_dump(multi_heap_handle_t heap)
|
||||
{
|
||||
assert(heap != NULL);
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
MULTI_HEAP_STDERR_PRINTF("Showing data for heap: %p \n", (void *)heap);
|
||||
tlsf_walk_pool(tlsf_get_pool(heap->heap_data), multi_heap_dump_tlsf, NULL);
|
||||
multi_heap_internal_unlock(heap);
|
||||
}
|
||||
|
||||
size_t multi_heap_free_size_impl(multi_heap_handle_t heap)
|
||||
{
|
||||
if (heap == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return heap->free_bytes;
|
||||
}
|
||||
|
||||
size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap)
|
||||
{
|
||||
if (heap == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return heap->minimum_free_bytes;
|
||||
}
|
||||
|
||||
static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* user)
|
||||
{
|
||||
multi_heap_info_t *info = user;
|
||||
|
||||
if(used) {
|
||||
info->allocated_blocks++;
|
||||
} else {
|
||||
info->free_blocks++;
|
||||
|
||||
if(size > info->largest_free_block ) {
|
||||
info->largest_free_block = size;
|
||||
}
|
||||
}
|
||||
|
||||
info->total_blocks++;
|
||||
}
|
||||
|
||||
void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
|
||||
{
|
||||
memset(info, 0, sizeof(multi_heap_info_t));
|
||||
|
||||
if (heap == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
tlsf_walk_pool(tlsf_get_pool(heap->heap_data), multi_heap_get_info_tlsf, info);
|
||||
info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes;
|
||||
info->minimum_free_bytes = heap->minimum_free_bytes;
|
||||
info->total_free_bytes = heap->free_bytes;
|
||||
info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block);
|
||||
multi_heap_internal_unlock(heap);
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
#include "sdkconfig.h"
|
||||
#include "soc/soc.h"
|
||||
#include "soc/soc_caps.h"
|
||||
#endif
|
||||
|
||||
/* Configuration macros for multi-heap */
|
||||
|
||||
#ifdef CONFIG_HEAP_POISONING_LIGHT
|
||||
#define MULTI_HEAP_POISONING
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HEAP_POISONING_COMPREHENSIVE
|
||||
#define MULTI_HEAP_POISONING
|
||||
#define MULTI_HEAP_POISONING_SLOW
|
||||
#endif
|
||||
@@ -1,76 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
/* Opaque handle to a heap block */
|
||||
typedef const struct block_header_t *multi_heap_block_handle_t;
|
||||
|
||||
/* Internal definitions for the "implementation" of the multi_heap API,
|
||||
as defined in multi_heap.c.
|
||||
|
||||
If heap poisioning is disabled, these are aliased directly to the public API.
|
||||
|
||||
If heap poisoning is enabled, wrapper functions call each of these.
|
||||
*/
|
||||
|
||||
void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size);
|
||||
|
||||
/* Allocate a memory region of minimum `size` bytes, aligned on `alignment`. */
|
||||
void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment);
|
||||
|
||||
/* Allocate a memory region of minimum `size` bytes, where memory's `offset` is aligned on `alignment`. */
|
||||
void *multi_heap_aligned_alloc_impl_offs(multi_heap_handle_t heap, size_t size, size_t alignment, size_t offset);
|
||||
|
||||
void multi_heap_free_impl(multi_heap_handle_t heap, void *p);
|
||||
void *multi_heap_realloc_impl(multi_heap_handle_t heap, void *p, size_t size);
|
||||
multi_heap_handle_t multi_heap_register_impl(void *start, size_t size);
|
||||
void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info);
|
||||
size_t multi_heap_free_size_impl(multi_heap_handle_t heap);
|
||||
size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap);
|
||||
size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p);
|
||||
void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block);
|
||||
|
||||
/* Some internal functions for heap poisoning use */
|
||||
|
||||
/* Check an allocated block's poison bytes are correct. Called by multi_heap_check(). */
|
||||
bool multi_heap_internal_check_block_poisoning(void *start, size_t size, bool is_free, bool print_errors);
|
||||
|
||||
/* Fill a region of memory with the free or malloced pattern.
|
||||
Called when merging blocks, to overwrite the old block header.
|
||||
*/
|
||||
void multi_heap_internal_poison_fill_region(void *start, size_t size, bool is_free);
|
||||
|
||||
/* Allow heap poisoning to lock/unlock the heap to avoid race conditions
|
||||
if multi_heap_check() is running concurrently.
|
||||
*/
|
||||
void multi_heap_internal_lock(multi_heap_handle_t heap);
|
||||
|
||||
void multi_heap_internal_unlock(multi_heap_handle_t heap);
|
||||
|
||||
/* Some internal functions for heap debugging code to use */
|
||||
|
||||
/* Get the handle to the first (fixed free) block in a heap */
|
||||
multi_heap_block_handle_t multi_heap_get_first_block(multi_heap_handle_t heap);
|
||||
|
||||
/* Get the handle to the next block in a heap, with validation */
|
||||
multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, multi_heap_block_handle_t block);
|
||||
|
||||
/* Test if a heap block is free */
|
||||
bool multi_heap_is_free(const multi_heap_block_handle_t block);
|
||||
|
||||
/* Get the data address of a heap block */
|
||||
void *multi_heap_get_block_address(multi_heap_block_handle_t block);
|
||||
|
||||
/* Get the owner identification for a heap block */
|
||||
void *multi_heap_get_block_owner(multi_heap_block_handle_t block);
|
||||
@@ -1,108 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#ifdef MULTI_HEAP_FREERTOS
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_rom_sys.h"
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
#include "esp32/rom/ets_sys.h" // will be removed in idf v5.0
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
#include "esp32s2/rom/ets_sys.h"
|
||||
#endif
|
||||
#include <assert.h>
|
||||
|
||||
typedef portMUX_TYPE multi_heap_lock_t;
|
||||
|
||||
/* Because malloc/free can happen inside an ISR context,
|
||||
we need to use portmux spinlocks here not RTOS mutexes */
|
||||
#define MULTI_HEAP_LOCK(PLOCK) do { \
|
||||
if((PLOCK) != NULL) { \
|
||||
portENTER_CRITICAL((PLOCK)); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
|
||||
#define MULTI_HEAP_UNLOCK(PLOCK) do { \
|
||||
if ((PLOCK) != NULL) { \
|
||||
portEXIT_CRITICAL((PLOCK)); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define MULTI_HEAP_LOCK_INIT(PLOCK) do { \
|
||||
vPortCPUInitializeMutex((PLOCK)); \
|
||||
} while(0)
|
||||
|
||||
#define MULTI_HEAP_LOCK_STATIC_INITIALIZER portMUX_INITIALIZER_UNLOCKED
|
||||
|
||||
/* Not safe to use std i/o while in a portmux critical section,
|
||||
can deadlock, so we use the ROM equivalent functions. */
|
||||
|
||||
#define MULTI_HEAP_PRINTF esp_rom_printf
|
||||
#define MULTI_HEAP_STDERR_PRINTF(MSG, ...) esp_rom_printf(MSG, __VA_ARGS__)
|
||||
|
||||
inline static void multi_heap_assert(bool condition, const char *format, int line, intptr_t address)
|
||||
{
|
||||
/* Can't use libc assert() here as it calls printf() which can cause another malloc() for a newlib lock.
|
||||
|
||||
Also, it's useful to be able to print the memory address where corruption was detected.
|
||||
*/
|
||||
#ifndef NDEBUG
|
||||
if(!condition) {
|
||||
#ifndef CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT
|
||||
esp_rom_printf(format, line, address);
|
||||
#endif // CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT
|
||||
abort();
|
||||
}
|
||||
#else // NDEBUG
|
||||
(void) condition;
|
||||
#endif // NDEBUG
|
||||
}
|
||||
|
||||
#define MULTI_HEAP_ASSERT(CONDITION, ADDRESS) \
|
||||
multi_heap_assert((CONDITION), "CORRUPT HEAP: multi_heap.c:%d detected at 0x%08x\n", \
|
||||
__LINE__, (intptr_t)(ADDRESS))
|
||||
|
||||
#ifdef CONFIG_HEAP_TASK_TRACKING
|
||||
#include <freertos/task.h>
|
||||
#define MULTI_HEAP_BLOCK_OWNER TaskHandle_t task;
|
||||
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD) (HEAD)->task = xTaskGetCurrentTaskHandle()
|
||||
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) ((HEAD)->task)
|
||||
#else
|
||||
#define MULTI_HEAP_BLOCK_OWNER
|
||||
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
|
||||
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
|
||||
#endif
|
||||
|
||||
#else // MULTI_HEAP_FREERTOS
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#define MULTI_HEAP_PRINTF printf
|
||||
#define MULTI_HEAP_STDERR_PRINTF(MSG, ...) fprintf(stderr, MSG, __VA_ARGS__)
|
||||
#define MULTI_HEAP_LOCK(PLOCK) (void) (PLOCK)
|
||||
#define MULTI_HEAP_UNLOCK(PLOCK) (void) (PLOCK)
|
||||
#define MULTI_HEAP_LOCK_INIT(PLOCK) (void) (PLOCK)
|
||||
#define MULTI_HEAP_LOCK_STATIC_INITIALIZER 0
|
||||
|
||||
#define MULTI_HEAP_ASSERT(CONDITION, ADDRESS) assert((CONDITION) && "Heap corrupt")
|
||||
|
||||
#define MULTI_HEAP_BLOCK_OWNER
|
||||
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
|
||||
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
|
||||
|
||||
#endif // MULTI_HEAP_FREERTOS
|
||||
@@ -1,426 +0,0 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/param.h>
|
||||
#include <multi_heap.h>
|
||||
#include "multi_heap_internal.h"
|
||||
|
||||
/* Note: Keep platform-specific parts in this header, this source
|
||||
file should depend on libc only */
|
||||
#include "multi_heap_platform.h"
|
||||
|
||||
/* Defines compile-time configuration macros */
|
||||
#include "multi_heap_config.h"
|
||||
|
||||
#ifdef MULTI_HEAP_POISONING
|
||||
|
||||
/* Alias MULTI_HEAP_POISONING_SLOW to SLOW for better readabilty */
|
||||
#ifdef SLOW
|
||||
#error "external header has defined SLOW"
|
||||
#endif
|
||||
#ifdef MULTI_HEAP_POISONING_SLOW
|
||||
#define SLOW 1
|
||||
#endif
|
||||
|
||||
#define MALLOC_FILL_PATTERN 0xce
|
||||
#define FREE_FILL_PATTERN 0xfe
|
||||
|
||||
#define HEAD_CANARY_PATTERN 0xABBA1234
|
||||
#define TAIL_CANARY_PATTERN 0xBAAD5678
|
||||
|
||||
|
||||
#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
|
||||
|
||||
typedef struct {
|
||||
uint32_t head_canary;
|
||||
MULTI_HEAP_BLOCK_OWNER
|
||||
size_t alloc_size;
|
||||
} poison_head_t;
|
||||
|
||||
typedef struct {
|
||||
uint32_t tail_canary;
|
||||
} poison_tail_t;
|
||||
|
||||
#define POISON_OVERHEAD (sizeof(poison_head_t) + sizeof(poison_tail_t))
|
||||
|
||||
/* Given a "poisoned" region with pre-data header 'head', and actual data size 'alloc_size', fill in the head and tail
|
||||
region checks.
|
||||
|
||||
Returns the pointer to the actual usable data buffer (ie after 'head')
|
||||
*/
|
||||
static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
|
||||
{
|
||||
uint8_t *data = (uint8_t *)(&head[1]); /* start of data ie 'real' allocated buffer */
|
||||
poison_tail_t *tail = (poison_tail_t *)(data + alloc_size);
|
||||
head->alloc_size = alloc_size;
|
||||
head->head_canary = HEAD_CANARY_PATTERN;
|
||||
MULTI_HEAP_SET_BLOCK_OWNER(head);
|
||||
|
||||
uint32_t tail_canary = TAIL_CANARY_PATTERN;
|
||||
if ((intptr_t)tail % sizeof(void *) == 0) {
|
||||
tail->tail_canary = tail_canary;
|
||||
} else {
|
||||
/* unaligned tail_canary */
|
||||
memcpy(&tail->tail_canary, &tail_canary, sizeof(uint32_t));
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
/* Given a pointer to some allocated data, check the head & tail poison structures (before & after it) that were
|
||||
previously injected by poison_allocated_region().
|
||||
|
||||
Returns a pointer to the poison header structure, or NULL if the poison structures are corrupt.
|
||||
*/
|
||||
static poison_head_t *verify_allocated_region(void *data, bool print_errors)
|
||||
{
|
||||
poison_head_t *head = (poison_head_t *)((intptr_t)data - sizeof(poison_head_t));
|
||||
poison_tail_t *tail = (poison_tail_t *)((intptr_t)data + head->alloc_size);
|
||||
|
||||
/* check if the beginning of the data was overwritten */
|
||||
if (head->head_canary != HEAD_CANARY_PATTERN) {
|
||||
if (print_errors) {
|
||||
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Bad head at %p. Expected 0x%08x got 0x%08x\n", &head->head_canary,
|
||||
HEAD_CANARY_PATTERN, head->head_canary);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* check if the end of the data was overrun */
|
||||
uint32_t canary;
|
||||
if ((intptr_t)tail % sizeof(void *) == 0) {
|
||||
canary = tail->tail_canary;
|
||||
} else {
|
||||
/* tail is unaligned */
|
||||
memcpy(&canary, &tail->tail_canary, sizeof(canary));
|
||||
}
|
||||
if (canary != TAIL_CANARY_PATTERN) {
|
||||
if (print_errors) {
|
||||
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Bad tail at %p. Expected 0x%08x got 0x%08x\n", &tail->tail_canary,
|
||||
TAIL_CANARY_PATTERN, canary);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return head;
|
||||
}
|
||||
|
||||
#ifdef SLOW
|
||||
/* Go through a region that should have the specified fill byte 'pattern',
|
||||
verify it.
|
||||
|
||||
if expect_free is true, expect FREE_FILL_PATTERN otherwise MALLOC_FILL_PATTERN.
|
||||
|
||||
if swap_pattern is true, swap patterns in the buffer (ie replace MALLOC_FILL_PATTERN with FREE_FILL_PATTERN, and vice versa.)
|
||||
|
||||
Returns true if verification checks out.
|
||||
*/
|
||||
static bool verify_fill_pattern(void *data, size_t size, bool print_errors, bool expect_free, bool swap_pattern)
|
||||
{
|
||||
const uint32_t FREE_FILL_WORD = (FREE_FILL_PATTERN << 24) | (FREE_FILL_PATTERN << 16) | (FREE_FILL_PATTERN << 8) | FREE_FILL_PATTERN;
|
||||
const uint32_t MALLOC_FILL_WORD = (MALLOC_FILL_PATTERN << 24) | (MALLOC_FILL_PATTERN << 16) | (MALLOC_FILL_PATTERN << 8) | MALLOC_FILL_PATTERN;
|
||||
|
||||
const uint32_t EXPECT_WORD = expect_free ? FREE_FILL_WORD : MALLOC_FILL_WORD;
|
||||
const uint32_t REPLACE_WORD = expect_free ? MALLOC_FILL_WORD : FREE_FILL_WORD;
|
||||
bool valid = true;
|
||||
|
||||
/* Use 4-byte operations as much as possible */
|
||||
if ((intptr_t)data % 4 == 0) {
|
||||
uint32_t *p = data;
|
||||
while (size >= 4) {
|
||||
if (*p != EXPECT_WORD) {
|
||||
if (print_errors) {
|
||||
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%08x got 0x%08x\n", p, EXPECT_WORD, *p);
|
||||
}
|
||||
valid = false;
|
||||
#ifndef NDEBUG
|
||||
/* If an assertion is going to fail as soon as we're done verifying the pattern, leave the rest of the
|
||||
buffer contents as-is for better post-mortem analysis
|
||||
*/
|
||||
swap_pattern = false;
|
||||
#endif
|
||||
}
|
||||
if (swap_pattern) {
|
||||
*p = REPLACE_WORD;
|
||||
}
|
||||
p++;
|
||||
size -= 4;
|
||||
}
|
||||
data = p;
|
||||
}
|
||||
|
||||
uint8_t *p = data;
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (p[i] != (uint8_t)EXPECT_WORD) {
|
||||
if (print_errors) {
|
||||
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Invalid data at %p. Expected 0x%02x got 0x%02x\n", p, (uint8_t)EXPECT_WORD, *p);
|
||||
}
|
||||
valid = false;
|
||||
#ifndef NDEBUG
|
||||
swap_pattern = false; // same as above
|
||||
#endif
|
||||
}
|
||||
if (swap_pattern) {
|
||||
p[i] = (uint8_t)REPLACE_WORD;
|
||||
}
|
||||
}
|
||||
return valid;
|
||||
}
|
||||
#endif
|
||||
|
||||
void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t alignment)
|
||||
{
|
||||
if (!size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (size > SIZE_MAX - POISON_OVERHEAD) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
poison_head_t *head = multi_heap_aligned_alloc_impl_offs(heap, size + POISON_OVERHEAD,
|
||||
alignment, sizeof(poison_head_t));
|
||||
uint8_t *data = NULL;
|
||||
if (head != NULL) {
|
||||
data = poison_allocated_region(head, size);
|
||||
#ifdef SLOW
|
||||
/* check everything we got back is FREE_FILL_PATTERN & swap for MALLOC_FILL_PATTERN */
|
||||
bool ret = verify_fill_pattern(data, size, true, true, true);
|
||||
assert( ret );
|
||||
#endif
|
||||
} else {
|
||||
multi_heap_internal_unlock(heap);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
multi_heap_internal_unlock(heap);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
|
||||
{
|
||||
if (!size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if(size > SIZE_MAX - POISON_OVERHEAD) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
poison_head_t *head = multi_heap_malloc_impl(heap, size + POISON_OVERHEAD);
|
||||
uint8_t *data = NULL;
|
||||
if (head != NULL) {
|
||||
data = poison_allocated_region(head, size);
|
||||
#ifdef SLOW
|
||||
/* check everything we got back is FREE_FILL_PATTERN & swap for MALLOC_FILL_PATTERN */
|
||||
bool ret = verify_fill_pattern(data, size, true, true, true);
|
||||
assert( ret );
|
||||
#endif
|
||||
}
|
||||
|
||||
multi_heap_internal_unlock(heap);
|
||||
return data;
|
||||
}
|
||||
|
||||
void multi_heap_free(multi_heap_handle_t heap, void *p)
|
||||
{
|
||||
if (p == NULL) {
|
||||
return;
|
||||
}
|
||||
multi_heap_internal_lock(heap);
|
||||
|
||||
poison_head_t *head = verify_allocated_region(p, true);
|
||||
assert(head != NULL);
|
||||
|
||||
#ifdef SLOW
|
||||
/* replace everything with FREE_FILL_PATTERN, including the poison head/tail */
|
||||
memset(head, FREE_FILL_PATTERN,
|
||||
head->alloc_size + POISON_OVERHEAD);
|
||||
#endif
|
||||
multi_heap_free_impl(heap, head);
|
||||
|
||||
multi_heap_internal_unlock(heap);
|
||||
}
|
||||
|
||||
void multi_heap_aligned_free(multi_heap_handle_t heap, void *p)
|
||||
{
|
||||
multi_heap_free(heap, p);
|
||||
}
|
||||
|
||||
void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size)
|
||||
{
|
||||
poison_head_t *head = NULL;
|
||||
poison_head_t *new_head;
|
||||
void *result = NULL;
|
||||
|
||||
if(size > SIZE_MAX - POISON_OVERHEAD) {
|
||||
return NULL;
|
||||
}
|
||||
if (p == NULL) {
|
||||
return multi_heap_malloc(heap, size);
|
||||
}
|
||||
if (size == 0) {
|
||||
multi_heap_free(heap, p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* p != NULL, size != 0 */
|
||||
head = verify_allocated_region(p, true);
|
||||
assert(head != NULL);
|
||||
|
||||
multi_heap_internal_lock(heap);
|
||||
|
||||
#ifndef SLOW
|
||||
new_head = multi_heap_realloc_impl(heap, head, size + POISON_OVERHEAD);
|
||||
if (new_head != NULL) {
|
||||
/* For "fast" poisoning, we only overwrite the head/tail of the new block so it's safe
|
||||
to poison, so no problem doing this even if realloc resized in place.
|
||||
*/
|
||||
result = poison_allocated_region(new_head, size);
|
||||
}
|
||||
#else // SLOW
|
||||
/* When slow poisoning is enabled, it becomes very fiddly to try and correctly fill memory when resizing in place
|
||||
(where the buffer may be moved (including to an overlapping address with the old buffer), grown, or shrunk in
|
||||
place.)
|
||||
|
||||
For now we just malloc a new buffer, copy, and free. :|
|
||||
|
||||
Note: If this ever changes, multi_heap defrag realloc test should be enabled.
|
||||
*/
|
||||
size_t orig_alloc_size = head->alloc_size;
|
||||
|
||||
new_head = multi_heap_malloc_impl(heap, size + POISON_OVERHEAD);
|
||||
if (new_head != NULL) {
|
||||
result = poison_allocated_region(new_head, size);
|
||||
memcpy(result, p, MIN(size, orig_alloc_size));
|
||||
multi_heap_free(heap, p);
|
||||
}
|
||||
#endif
|
||||
|
||||
multi_heap_internal_unlock(heap);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void *multi_heap_get_block_address(multi_heap_block_handle_t block)
|
||||
{
|
||||
char *head = multi_heap_get_block_address_impl(block);
|
||||
return head + sizeof(poison_head_t);
|
||||
}
|
||||
|
||||
void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
|
||||
{
|
||||
return MULTI_HEAP_GET_BLOCK_OWNER((poison_head_t*)multi_heap_get_block_address_impl(block));
|
||||
}
|
||||
|
||||
multi_heap_handle_t multi_heap_register(void *start, size_t size)
|
||||
{
|
||||
#ifdef SLOW
|
||||
if (start != NULL) {
|
||||
memset(start, FREE_FILL_PATTERN, size);
|
||||
}
|
||||
#endif
|
||||
return multi_heap_register_impl(start, size);
|
||||
}
|
||||
|
||||
static inline void subtract_poison_overhead(size_t *arg) {
|
||||
if (*arg > POISON_OVERHEAD) {
|
||||
*arg -= POISON_OVERHEAD;
|
||||
} else {
|
||||
*arg = 0;
|
||||
}
|
||||
}
|
||||
|
||||
size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
|
||||
{
|
||||
poison_head_t *head = verify_allocated_region(p, true);
|
||||
assert(head != NULL);
|
||||
size_t result = multi_heap_get_allocated_size_impl(heap, head);
|
||||
return result;
|
||||
}
|
||||
|
||||
void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info)
|
||||
{
|
||||
multi_heap_get_info_impl(heap, info);
|
||||
/* don't count the heap poison head & tail overhead in the allocated bytes size */
|
||||
info->total_allocated_bytes -= info->allocated_blocks * POISON_OVERHEAD;
|
||||
/* trim largest_free_block to account for poison overhead */
|
||||
subtract_poison_overhead(&info->largest_free_block);
|
||||
/* similarly, trim total_free_bytes so there's no suggestion that
|
||||
a block this big may be available. */
|
||||
subtract_poison_overhead(&info->total_free_bytes);
|
||||
subtract_poison_overhead(&info->minimum_free_bytes);
|
||||
}
|
||||
|
||||
size_t multi_heap_free_size(multi_heap_handle_t heap)
|
||||
{
|
||||
size_t r = multi_heap_free_size_impl(heap);
|
||||
subtract_poison_overhead(&r);
|
||||
return r;
|
||||
}
|
||||
|
||||
size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
|
||||
{
|
||||
size_t r = multi_heap_minimum_free_size_impl(heap);
|
||||
subtract_poison_overhead(&r);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Internal hooks used by multi_heap to manage poisoning, while keeping some modularity */
|
||||
|
||||
bool multi_heap_internal_check_block_poisoning(void *start, size_t size, bool is_free, bool print_errors)
|
||||
{
|
||||
if (is_free) {
|
||||
#ifdef SLOW
|
||||
return verify_fill_pattern(start, size, print_errors, true, false);
|
||||
#else
|
||||
return true; /* can only verify empty blocks in SLOW mode */
|
||||
#endif
|
||||
} else {
|
||||
void *data = (void *)((intptr_t)start + sizeof(poison_head_t));
|
||||
poison_head_t *head = verify_allocated_region(data, print_errors);
|
||||
if (head != NULL && head->alloc_size > size - POISON_OVERHEAD) {
|
||||
/* block can be bigger than alloc_size, for reasons of alignment & fragmentation,
|
||||
but block can never be smaller than head->alloc_size... */
|
||||
if (print_errors) {
|
||||
MULTI_HEAP_STDERR_PRINTF("CORRUPT HEAP: Size at %p expected <=0x%08x got 0x%08x\n", &head->alloc_size,
|
||||
size - POISON_OVERHEAD, head->alloc_size);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return head != NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void multi_heap_internal_poison_fill_region(void *start, size_t size, bool is_free)
|
||||
{
|
||||
memset(start, is_free ? FREE_FILL_PATTERN : MALLOC_FILL_PATTERN, size);
|
||||
}
|
||||
|
||||
#else // !MULTI_HEAP_POISONING
|
||||
|
||||
#ifdef MULTI_HEAP_POISONING_SLOW
|
||||
#error "MULTI_HEAP_POISONING_SLOW requires MULTI_HEAP_POISONING"
|
||||
#endif
|
||||
|
||||
#endif // MULTI_HEAP_POISONING
|
||||
@@ -1,3 +0,0 @@
|
||||
idf_component_register(SRC_DIRS "."
|
||||
PRIV_INCLUDE_DIRS "."
|
||||
PRIV_REQUIRES cmock test_utils heap)
|
||||
@@ -1,5 +0,0 @@
|
||||
#
|
||||
#Component Makefile
|
||||
#
|
||||
|
||||
COMPONENT_ADD_LDFLAGS = -Wl,--whole-archive -l$(COMPONENT_NAME) -Wl,--no-whole-archive
|
||||
@@ -1,147 +0,0 @@
|
||||
/*
|
||||
Tests for the capabilities-based memory allocator.
|
||||
*/
|
||||
|
||||
#include <esp_types.h>
|
||||
#include <stdio.h>
|
||||
#include "unity.h"
|
||||
#include "esp_attr.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "esp_spi_flash.h"
|
||||
#include <stdlib.h>
|
||||
#include <sys/param.h>
|
||||
#include <string.h>
|
||||
#include <malloc.h>
|
||||
|
||||
TEST_CASE("Capabilities aligned allocator test", "[heap]")
|
||||
{
|
||||
uint32_t alignments = 0;
|
||||
|
||||
printf("[ALIGNED_ALLOC] Allocating from default CAP: \n");
|
||||
|
||||
for(;alignments <= 1024; alignments++) {
|
||||
uint8_t *buf = (uint8_t *)memalign(alignments, (alignments + 137));
|
||||
if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
|
||||
TEST_ASSERT( buf == NULL );
|
||||
//printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
|
||||
} else {
|
||||
TEST_ASSERT( buf != NULL );
|
||||
printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
|
||||
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
|
||||
//Address of obtained block must be aligned with selected value
|
||||
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
|
||||
|
||||
//Write some data, if it corrupts memory probably the heap
|
||||
//canary verification will fail:
|
||||
memset(buf, 0xA5, (alignments + 137));
|
||||
|
||||
free(buf);
|
||||
}
|
||||
}
|
||||
|
||||
//Alloc from a non permitted area:
|
||||
uint32_t *not_permitted_buf = (uint32_t *)heap_caps_aligned_alloc(alignments, (alignments + 137), MALLOC_CAP_EXEC | MALLOC_CAP_32BIT);
|
||||
TEST_ASSERT( not_permitted_buf == NULL );
|
||||
|
||||
#if CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT
|
||||
alignments = 0;
|
||||
printf("[ALIGNED_ALLOC] Allocating from external memory: \n");
|
||||
|
||||
for(;alignments <= 1024 * 1024; alignments++) {
|
||||
//Now try to take aligned memory from IRAM:
|
||||
uint8_t *buf = (uint8_t *)heap_caps_aligned_alloc(alignments, 10*1024, MALLOC_CAP_SPIRAM);
|
||||
if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
|
||||
TEST_ASSERT( buf == NULL );
|
||||
//printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
|
||||
} else {
|
||||
TEST_ASSERT( buf != NULL );
|
||||
printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
|
||||
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
|
||||
//Address of obtained block must be aligned with selected value
|
||||
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
|
||||
|
||||
//Write some data, if it corrupts memory probably the heap
|
||||
//canary verification will fail:
|
||||
memset(buf, 0xA5, (10*1024));
|
||||
heap_caps_free(buf);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
TEST_CASE("Capabilities aligned calloc test", "[heap]")
|
||||
{
|
||||
uint32_t alignments = 0;
|
||||
|
||||
printf("[ALIGNED_ALLOC] Allocating from default CAP: \n");
|
||||
|
||||
for(;alignments <= 1024; alignments++) {
|
||||
uint8_t *buf = (uint8_t *)heap_caps_aligned_calloc(alignments, 1, (alignments + 137), MALLOC_CAP_DEFAULT);
|
||||
if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
|
||||
TEST_ASSERT( buf == NULL );
|
||||
//printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
|
||||
} else {
|
||||
TEST_ASSERT( buf != NULL );
|
||||
printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
|
||||
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
|
||||
//Address of obtained block must be aligned with selected value
|
||||
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
|
||||
|
||||
//Write some data, if it corrupts memory probably the heap
|
||||
//canary verification will fail:
|
||||
memset(buf, 0xA5, (alignments + 137));
|
||||
|
||||
heap_caps_free(buf);
|
||||
}
|
||||
}
|
||||
|
||||
//Check if memory is initialized with zero:
|
||||
uint8_t byte_array[1024];
|
||||
memset(&byte_array, 0, sizeof(byte_array));
|
||||
uint8_t *buf = (uint8_t *)heap_caps_aligned_calloc(1024, 1, 1024, MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(memcmp(byte_array, buf, sizeof(byte_array)) == 0);
|
||||
heap_caps_free(buf);
|
||||
|
||||
//Same size, but different chunk:
|
||||
buf = (uint8_t *)heap_caps_aligned_calloc(1024, 1024, 1, MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(memcmp(byte_array, buf, sizeof(byte_array)) == 0);
|
||||
heap_caps_free(buf);
|
||||
|
||||
//Alloc from a non permitted area:
|
||||
uint32_t *not_permitted_buf = (uint32_t *)heap_caps_aligned_calloc(alignments, 1, (alignments + 137), MALLOC_CAP_32BIT);
|
||||
TEST_ASSERT( not_permitted_buf == NULL );
|
||||
|
||||
#if CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT
|
||||
alignments = 0;
|
||||
printf("[ALIGNED_ALLOC] Allocating from external memory: \n");
|
||||
|
||||
for(;alignments <= 1024 * 1024; alignments++) {
|
||||
//Now try to take aligned memory from IRAM:
|
||||
uint8_t *buf = (uint8_t *)(uint8_t *)heap_caps_aligned_calloc(alignments, 1, 10*1024, MALLOC_CAP_SPIRAM);
|
||||
if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
|
||||
TEST_ASSERT( buf == NULL );
|
||||
//printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
|
||||
} else {
|
||||
TEST_ASSERT( buf != NULL );
|
||||
printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
|
||||
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
|
||||
//Address of obtained block must be aligned with selected value
|
||||
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
|
||||
|
||||
//Write some data, if it corrupts memory probably the heap
|
||||
//canary verification will fail:
|
||||
memset(buf, 0xA5, (10*1024));
|
||||
heap_caps_free(buf);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
TEST_CASE("aligned_alloc(0) should return a NULL pointer", "[heap]")
|
||||
{
|
||||
void *p;
|
||||
p = heap_caps_aligned_alloc(32, 0, MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(p == NULL);
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include <esp_types.h>
|
||||
#include <stdio.h>
|
||||
#include "unity.h"
|
||||
#include "esp_attr.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include <stdlib.h>
|
||||
#include <sys/param.h>
|
||||
#include <string.h>
|
||||
#include <test_utils.h>
|
||||
|
||||
//This test only makes sense with poisoning disabled (light or comprehensive)
|
||||
#if !defined(CONFIG_HEAP_POISONING_COMPREHENSIVE) && !defined(CONFIG_HEAP_POISONING_LIGHT)
|
||||
|
||||
#define NUM_POINTERS 128
|
||||
#define ITERATIONS 10000
|
||||
|
||||
TEST_CASE("Heap many random allocations timings", "[heap]")
|
||||
{
|
||||
void *p[NUM_POINTERS] = { 0 };
|
||||
size_t s[NUM_POINTERS] = { 0 };
|
||||
|
||||
uint32_t cycles_before;
|
||||
uint64_t alloc_time_average = 0;
|
||||
uint64_t free_time_average = 0;
|
||||
uint64_t realloc_time_average = 0;
|
||||
|
||||
for (int i = 0; i < ITERATIONS; i++) {
|
||||
uint8_t n = esp_random() % NUM_POINTERS;
|
||||
|
||||
if (esp_random() % 4 == 0) {
|
||||
/* 1 in 4 iterations, try to realloc the buffer instead
|
||||
of using malloc/free
|
||||
*/
|
||||
size_t new_size = esp_random() % 1024;
|
||||
|
||||
cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
|
||||
void *new_p = heap_caps_realloc(p[n], new_size, MALLOC_CAP_DEFAULT);
|
||||
realloc_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
|
||||
|
||||
printf("realloc %p -> %p (%zu -> %zu) time spent cycles: %lld \n", p[n], new_p, s[n], new_size, realloc_time_average);
|
||||
heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true);
|
||||
if (new_size == 0 || new_p != NULL) {
|
||||
p[n] = new_p;
|
||||
s[n] = new_size;
|
||||
if (new_size > 0) {
|
||||
memset(p[n], n, new_size);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (p[n] != NULL) {
|
||||
if (s[n] > 0) {
|
||||
/* Verify pre-existing contents of p[n] */
|
||||
uint8_t compare[s[n]];
|
||||
memset(compare, n, s[n]);
|
||||
TEST_ASSERT(( memcmp(compare, p[n], s[n]) == 0 ));
|
||||
}
|
||||
TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true));
|
||||
|
||||
cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
|
||||
heap_caps_free(p[n]);
|
||||
free_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
|
||||
|
||||
printf("freed %p (%zu) time spent cycles: %lld\n", p[n], s[n], free_time_average);
|
||||
|
||||
if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
|
||||
printf("FAILED iteration %d after freeing %p\n", i, p[n]);
|
||||
heap_caps_dump(MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
s[n] = rand() % 1024;
|
||||
heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true);
|
||||
cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
|
||||
p[n] = heap_caps_malloc(s[n], MALLOC_CAP_DEFAULT);
|
||||
alloc_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
|
||||
|
||||
printf("malloc %p (%zu) time spent cycles: %lld \n", p[n], s[n], alloc_time_average);
|
||||
|
||||
if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
|
||||
printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
|
||||
heap_caps_dump(MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(0);
|
||||
}
|
||||
|
||||
if (p[n] != NULL) {
|
||||
memset(p[n], n, s[n]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < NUM_POINTERS; i++) {
|
||||
cycles_before = portGET_RUN_TIME_COUNTER_VALUE();
|
||||
heap_caps_free( p[i]);
|
||||
free_time_average = portGET_RUN_TIME_COUNTER_VALUE() - cycles_before;
|
||||
|
||||
if (!heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true)) {
|
||||
printf("FAILED during cleanup after freeing %p\n", p[i]);
|
||||
heap_caps_dump(MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_DEFAULT, true));
|
||||
}
|
||||
#endif
|
||||
@@ -1,74 +0,0 @@
|
||||
/*
|
||||
Tests for D/IRAM support in heap capability allocator
|
||||
*/
|
||||
|
||||
#include <esp_types.h>
|
||||
#include <stdio.h>
|
||||
#include "unity.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "soc/soc_memory_layout.h"
|
||||
|
||||
#define ALLOC_SZ 1024
|
||||
|
||||
static void *malloc_block_diram(uint32_t caps)
|
||||
{
|
||||
void *attempts[256] = { 0 }; // Allocate up to 256 ALLOC_SZ blocks to exhaust all non-D/IRAM memory temporarily
|
||||
int count = 0;
|
||||
void *result;
|
||||
|
||||
while(count < sizeof(attempts)/sizeof(void *)) {
|
||||
result = heap_caps_malloc(ALLOC_SZ, caps);
|
||||
TEST_ASSERT_NOT_NULL_MESSAGE(result, "not enough free heap to perform test");
|
||||
|
||||
if (esp_ptr_in_diram_dram(result) || esp_ptr_in_diram_iram(result)) {
|
||||
break;
|
||||
}
|
||||
|
||||
attempts[count] = result;
|
||||
result = NULL;
|
||||
count++;
|
||||
}
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
free(attempts[i]);
|
||||
}
|
||||
|
||||
TEST_ASSERT_NOT_NULL_MESSAGE(result, "not enough D/IRAM memory is free");
|
||||
return result;
|
||||
}
|
||||
|
||||
TEST_CASE("Allocate D/IRAM as DRAM", "[heap]")
|
||||
{
|
||||
uint32_t *dram = malloc_block_diram(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
|
||||
for (int i = 0; i < ALLOC_SZ / sizeof(uint32_t); i++) {
|
||||
uint32_t v = i + 0xAAAA;
|
||||
dram[i] = v;
|
||||
volatile uint32_t *iram = esp_ptr_diram_dram_to_iram(dram + i);
|
||||
TEST_ASSERT_EQUAL(v, dram[i]);
|
||||
TEST_ASSERT_EQUAL(v, *iram);
|
||||
*iram = UINT32_MAX;
|
||||
TEST_ASSERT_EQUAL(UINT32_MAX, *iram);
|
||||
TEST_ASSERT_EQUAL(UINT32_MAX, dram[i]);
|
||||
}
|
||||
|
||||
free(dram);
|
||||
}
|
||||
|
||||
TEST_CASE("Allocate D/IRAM as IRAM", "[heap]")
|
||||
{
|
||||
uint32_t *iram = malloc_block_diram(MALLOC_CAP_EXEC);
|
||||
|
||||
for (int i = 0; i < ALLOC_SZ / sizeof(uint32_t); i++) {
|
||||
uint32_t v = i + 0xEEE;
|
||||
iram[i] = v;
|
||||
volatile uint32_t *dram = esp_ptr_diram_iram_to_dram(iram + i);
|
||||
TEST_ASSERT_EQUAL_HEX32(v, iram[i]);
|
||||
TEST_ASSERT_EQUAL_HEX32(v, *dram);
|
||||
*dram = UINT32_MAX;
|
||||
TEST_ASSERT_EQUAL_HEX32(UINT32_MAX, *dram);
|
||||
TEST_ASSERT_EQUAL_HEX32(UINT32_MAX, iram[i]);
|
||||
}
|
||||
|
||||
free(iram);
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
/*
|
||||
Generic test for heap tracing support
|
||||
|
||||
Only compiled in if CONFIG_HEAP_TRACING is set
|
||||
*/
|
||||
|
||||
#include <esp_types.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "unity.h"
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
|
||||
#ifdef CONFIG_HEAP_TRACING
|
||||
// only compile in heap tracing tests if tracing is enabled
|
||||
|
||||
#include "esp_heap_trace.h"
|
||||
|
||||
TEST_CASE("heap trace leak check", "[heap]")
|
||||
{
|
||||
heap_trace_record_t recs[8];
|
||||
heap_trace_init_standalone(recs, 8);
|
||||
|
||||
printf("Leak check test\n"); // Print something before trace starts, or stdout allocations skew total counts
|
||||
fflush(stdout);
|
||||
|
||||
heap_trace_start(HEAP_TRACE_LEAKS);
|
||||
|
||||
void *a = malloc(64);
|
||||
memset(a, '3', 64);
|
||||
|
||||
void *b = malloc(96);
|
||||
memset(b, '4', 11);
|
||||
|
||||
printf("a.address %p vs %p b.address %p vs %p\n", a, recs[0].address, b, recs[1].address);
|
||||
|
||||
heap_trace_dump();
|
||||
TEST_ASSERT_EQUAL(2, heap_trace_get_count());
|
||||
|
||||
heap_trace_record_t trace_a, trace_b;
|
||||
heap_trace_get(0, &trace_a);
|
||||
heap_trace_get(1, &trace_b);
|
||||
|
||||
printf("trace_a.address %p trace_bb.address %p\n", trace_a.address, trace_b.address);
|
||||
|
||||
TEST_ASSERT_EQUAL_PTR(a, trace_a.address);
|
||||
TEST_ASSERT_EQUAL_PTR(b, trace_b.address);
|
||||
|
||||
TEST_ASSERT_EQUAL_PTR(recs[0].address, trace_a.address);
|
||||
TEST_ASSERT_EQUAL_PTR(recs[1].address, trace_b.address);
|
||||
|
||||
free(a);
|
||||
|
||||
TEST_ASSERT_EQUAL(1, heap_trace_get_count());
|
||||
|
||||
heap_trace_get(0, &trace_b);
|
||||
TEST_ASSERT_EQUAL_PTR(b, trace_b.address);
|
||||
|
||||
/* buffer deletes trace_a when freed,
|
||||
so trace_b at head of buffer */
|
||||
TEST_ASSERT_EQUAL_PTR(recs[0].address, trace_b.address);
|
||||
|
||||
heap_trace_stop();
|
||||
}
|
||||
|
||||
TEST_CASE("heap trace wrapped buffer check", "[heap]")
|
||||
{
|
||||
const size_t N = 8;
|
||||
heap_trace_record_t recs[N];
|
||||
heap_trace_init_standalone(recs, N);
|
||||
|
||||
heap_trace_start(HEAP_TRACE_LEAKS);
|
||||
|
||||
void *ptrs[N+1];
|
||||
for (int i = 0; i < N+1; i++) {
|
||||
ptrs[i] = malloc(i*3);
|
||||
}
|
||||
|
||||
// becuase other mallocs happen as part of this control flow,
|
||||
// we can't guarantee N entries of ptrs[] are in the heap check buffer.
|
||||
// but we should guarantee at least the last one is
|
||||
bool saw_last_ptr = false;
|
||||
for (int i = 0; i < N; i++) {
|
||||
heap_trace_record_t rec;
|
||||
heap_trace_get(i, &rec);
|
||||
if (rec.address == ptrs[N-1]) {
|
||||
saw_last_ptr = true;
|
||||
}
|
||||
}
|
||||
TEST_ASSERT(saw_last_ptr);
|
||||
|
||||
void *other = malloc(6);
|
||||
|
||||
heap_trace_dump();
|
||||
|
||||
for (int i = 0; i < N+1; i++) {
|
||||
free(ptrs[i]);
|
||||
}
|
||||
|
||||
heap_trace_dump();
|
||||
|
||||
bool saw_other = false;
|
||||
|
||||
for (int i = 0; i < heap_trace_get_count(); i++) {
|
||||
heap_trace_record_t rec;
|
||||
heap_trace_get(i, &rec);
|
||||
|
||||
// none of ptr[]s should be in the heap trace any more
|
||||
for (int j = 0; j < N+1; j++) {
|
||||
TEST_ASSERT_NOT_EQUAL(ptrs[j], rec.address);
|
||||
}
|
||||
if (rec.address == other) {
|
||||
saw_other = true;
|
||||
}
|
||||
}
|
||||
|
||||
// 'other' pointer should be somewhere in the leak dump
|
||||
TEST_ASSERT(saw_other);
|
||||
|
||||
heap_trace_stop();
|
||||
}
|
||||
|
||||
static void print_floats_task(void *ignore)
|
||||
{
|
||||
heap_trace_start(HEAP_TRACE_ALL);
|
||||
char buf[16] = { };
|
||||
volatile float f = 12.3456;
|
||||
sprintf(buf, "%.4f", f);
|
||||
TEST_ASSERT_EQUAL_STRING("12.3456", buf);
|
||||
heap_trace_stop();
|
||||
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
TEST_CASE("can trace allocations made by newlib", "[heap]")
|
||||
{
|
||||
const size_t N = 8;
|
||||
heap_trace_record_t recs[N];
|
||||
heap_trace_init_standalone(recs, N);
|
||||
|
||||
/* Verifying that newlib code performs an allocation is very fiddly:
|
||||
|
||||
- Printing a float allocates data associated with the task, but only the
|
||||
first time a task prints a float of this length. So we do it in a one-shot task
|
||||
to avoid possibility it already happened.
|
||||
|
||||
- If newlib is updated this test may start failing if the printf() implementation
|
||||
changes. (This version passes for both nano & regular formatting in newlib 2.2.0)
|
||||
|
||||
- We also do the tracing in the task so we only capture things directly related to it.
|
||||
*/
|
||||
|
||||
xTaskCreate(print_floats_task, "print_float", 4096, NULL, 5, NULL);
|
||||
vTaskDelay(10);
|
||||
|
||||
/* has to be at least a few as newlib allocates via multiple different function calls */
|
||||
TEST_ASSERT(heap_trace_get_count() > 3);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
Tests for a leak tag
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include "unity.h"
|
||||
#include "esp_heap_caps_init.h"
|
||||
#include "esp_system.h"
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
static char* check_calloc(int size)
|
||||
{
|
||||
char *arr = calloc(size, sizeof(char));
|
||||
TEST_ASSERT_NOT_NULL(arr);
|
||||
return arr;
|
||||
}
|
||||
|
||||
TEST_CASE("Check for leaks (no leak)", "[heap]")
|
||||
{
|
||||
char *arr = check_calloc(1000);
|
||||
free(arr);
|
||||
}
|
||||
|
||||
TEST_CASE("Check for leaks (leak)", "[heap][ignore]")
|
||||
{
|
||||
check_calloc(1000);
|
||||
}
|
||||
|
||||
TEST_CASE("Not check for leaks", "[heap][leaks]")
|
||||
{
|
||||
check_calloc(1000);
|
||||
}
|
||||
|
||||
TEST_CASE("Set a leak level = 7016", "[heap][leaks=7016]")
|
||||
{
|
||||
check_calloc(7000);
|
||||
}
|
||||
|
||||
static void test_fn(void)
|
||||
{
|
||||
check_calloc(1000);
|
||||
}
|
||||
|
||||
TEST_CASE_MULTIPLE_STAGES("Not check for leaks in MULTIPLE_STAGES mode", "[heap][leaks]", test_fn, test_fn, test_fn);
|
||||
|
||||
TEST_CASE_MULTIPLE_STAGES("Check for leaks in MULTIPLE_STAGES mode (leak)", "[heap][ignore]", test_fn, test_fn, test_fn);
|
||||
|
||||
static void test_fn2(void)
|
||||
{
|
||||
check_calloc(1000);
|
||||
esp_restart();
|
||||
}
|
||||
|
||||
static void test_fn3(void)
|
||||
{
|
||||
check_calloc(1000);
|
||||
}
|
||||
|
||||
TEST_CASE_MULTIPLE_STAGES("Check for leaks in MULTIPLE_STAGES mode (manual reset)", "[heap][leaks][reset=SW_CPU_RESET, SW_CPU_RESET]", test_fn2, test_fn2, test_fn3);
|
||||
@@ -1,134 +0,0 @@
|
||||
/*
|
||||
Generic test for malloc/free
|
||||
*/
|
||||
|
||||
#include <esp_types.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "freertos/queue.h"
|
||||
#include "unity.h"
|
||||
#include "esp_heap_caps.h"
|
||||
|
||||
#include "sdkconfig.h"
|
||||
|
||||
|
||||
static int **allocatedMem;
|
||||
static int noAllocated;
|
||||
|
||||
|
||||
static int tryAllocMem(void) {
|
||||
int i, j;
|
||||
const int allocateMaxK=1024*5; //try to allocate a max of 5MiB
|
||||
|
||||
allocatedMem=malloc(sizeof(int *)*allocateMaxK);
|
||||
if (!allocatedMem) return 0;
|
||||
|
||||
for (i=0; i<allocateMaxK; i++) {
|
||||
allocatedMem[i]=malloc(1024);
|
||||
if (allocatedMem[i]==NULL) break;
|
||||
for (j=0; j<1024/4; j++) allocatedMem[i][j]=(0xdeadbeef);
|
||||
}
|
||||
noAllocated=i;
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
static void tryAllocMemFree(void) {
|
||||
int i, j;
|
||||
for (i=0; i<noAllocated; i++) {
|
||||
for (j=0; j<1024/4; j++) {
|
||||
TEST_ASSERT(allocatedMem[i][j]==(0xdeadbeef));
|
||||
}
|
||||
free(allocatedMem[i]);
|
||||
}
|
||||
free(allocatedMem);
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("Malloc/overwrite, then free all available DRAM", "[heap]")
|
||||
{
|
||||
int m1=0, m2=0;
|
||||
m1=tryAllocMem();
|
||||
tryAllocMemFree();
|
||||
m2=tryAllocMem();
|
||||
tryAllocMemFree();
|
||||
printf("Could allocate %dK on first try, %dK on 2nd try.\n", m1, m2);
|
||||
TEST_ASSERT(m1==m2);
|
||||
}
|
||||
|
||||
#if CONFIG_SPIRAM_USE_MALLOC
|
||||
|
||||
#if (CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL > 1024)
|
||||
TEST_CASE("Check if reserved DMA pool still can allocate even when malloc()'ed memory is exhausted", "[heap]")
|
||||
{
|
||||
char** dmaMem=malloc(sizeof(char*)*512);
|
||||
assert(dmaMem);
|
||||
int m=tryAllocMem();
|
||||
int i=0;
|
||||
for (i=0; i<512; i++) {
|
||||
dmaMem[i]=heap_caps_malloc(1024, MALLOC_CAP_DMA);
|
||||
if (dmaMem[i]==NULL) break;
|
||||
}
|
||||
for (int j=0; j<i; j++) free(dmaMem[j]);
|
||||
free(dmaMem);
|
||||
tryAllocMemFree();
|
||||
printf("Could allocate %dK of DMA memory after allocating all of %dK of normal memory.\n", i, m);
|
||||
TEST_ASSERT(i);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* As you see, we are desperately trying to outsmart the compiler, so that it
|
||||
* doesn't warn about oversized allocations in the next two unit tests.
|
||||
* To be removed when we switch to GCC 8.2 and add
|
||||
* -Wno-alloc-size-larger-than=PTRDIFF_MAX to CFLAGS for this file.
|
||||
*/
|
||||
void* (*g_test_malloc_ptr)(size_t) = &malloc;
|
||||
void* (*g_test_calloc_ptr)(size_t, size_t) = &calloc;
|
||||
|
||||
void* test_malloc_wrapper(size_t size)
|
||||
{
|
||||
return (*g_test_malloc_ptr)(size);
|
||||
}
|
||||
|
||||
void* test_calloc_wrapper(size_t count, size_t size)
|
||||
{
|
||||
return (*g_test_calloc_ptr)(count, size);
|
||||
}
|
||||
|
||||
TEST_CASE("alloc overflows should all fail", "[heap]")
|
||||
{
|
||||
/* allocates 8 bytes if size_t overflows */
|
||||
TEST_ASSERT_NULL(test_calloc_wrapper(SIZE_MAX / 2 + 4, 2));
|
||||
|
||||
/* will overflow if any poisoning is enabled
|
||||
(should fail for sensible OOM reasons, otherwise) */
|
||||
TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX - 1));
|
||||
TEST_ASSERT_NULL(test_calloc_wrapper(SIZE_MAX - 1, 1));
|
||||
|
||||
/* will overflow when the size is rounded up to word align it */
|
||||
TEST_ASSERT_NULL(heap_caps_malloc(SIZE_MAX-1, MALLOC_CAP_32BIT));
|
||||
|
||||
TEST_ASSERT_NULL(heap_caps_malloc(SIZE_MAX-1, MALLOC_CAP_EXEC));
|
||||
}
|
||||
|
||||
TEST_CASE("unreasonable allocs should all fail", "[heap]")
|
||||
{
|
||||
TEST_ASSERT_NULL(test_calloc_wrapper(16, 1024*1024));
|
||||
TEST_ASSERT_NULL(test_malloc_wrapper(16*1024*1024));
|
||||
TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX / 2));
|
||||
TEST_ASSERT_NULL(test_malloc_wrapper(SIZE_MAX - 256));
|
||||
TEST_ASSERT_NULL(test_malloc_wrapper(xPortGetFreeHeapSize() - 1));
|
||||
}
|
||||
|
||||
TEST_CASE("malloc(0) should return a NULL pointer", "[heap]")
|
||||
{
|
||||
void *p;
|
||||
p = malloc(0);
|
||||
TEST_ASSERT(p == NULL);
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
/*
|
||||
Tests for the capabilities-based memory allocator.
|
||||
*/
|
||||
|
||||
#include <esp_types.h>
|
||||
#include <stdio.h>
|
||||
#include "unity.h"
|
||||
#include "esp_attr.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "esp_spi_flash.h"
|
||||
#include <stdlib.h>
|
||||
#include <sys/param.h>
|
||||
|
||||
#ifndef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
|
||||
TEST_CASE("Capabilities allocator test", "[heap]")
|
||||
{
|
||||
char *m1, *m2[10];
|
||||
int x;
|
||||
size_t free8start, free32start, free8, free32;
|
||||
|
||||
/* It's important we printf() something before we take the empty heap sizes,
|
||||
as the first printf() in a task allocates heap resources... */
|
||||
printf("Testing capabilities allocator...\n");
|
||||
|
||||
free8start = heap_caps_get_free_size(MALLOC_CAP_8BIT);
|
||||
free32start = heap_caps_get_free_size(MALLOC_CAP_32BIT);
|
||||
printf("Free 8bit-capable memory (start): %dK, 32-bit capable memory %dK\n", free8start, free32start);
|
||||
TEST_ASSERT(free32start >= free8start);
|
||||
|
||||
printf("Allocating 10K of 8-bit capable RAM\n");
|
||||
m1= heap_caps_malloc(10*1024, MALLOC_CAP_8BIT);
|
||||
printf("--> %p\n", m1);
|
||||
free8 = heap_caps_get_free_size(MALLOC_CAP_8BIT);
|
||||
free32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
|
||||
printf("Free 8bit-capable memory (both reduced): %dK, 32-bit capable memory %dK\n", free8, free32);
|
||||
//Both should have gone down by 10K; 8bit capable ram is also 32-bit capable
|
||||
TEST_ASSERT(free8<=(free8start-10*1024));
|
||||
TEST_ASSERT(free32<=(free32start-10*1024));
|
||||
//Assume we got DRAM back
|
||||
TEST_ASSERT((((int)m1)&0xFF000000)==0x3F000000);
|
||||
free(m1);
|
||||
|
||||
//The goal here is to allocate from IRAM. Since there is no external IRAM (yet)
|
||||
//the following gives size of IRAM-only (not D/IRAM) memory.
|
||||
size_t free_iram = heap_caps_get_free_size(MALLOC_CAP_INTERNAL) -
|
||||
heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
size_t alloc32 = MIN(free_iram / 2, 10*1024) & (~3);
|
||||
if(free_iram) {
|
||||
printf("Freeing; allocating %u bytes of 32K-capable RAM\n", alloc32);
|
||||
m1 = heap_caps_malloc(alloc32, MALLOC_CAP_32BIT);
|
||||
printf("--> %p\n", m1);
|
||||
//Check that we got IRAM back
|
||||
TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
|
||||
free8 = heap_caps_get_free_size(MALLOC_CAP_8BIT);
|
||||
free32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
|
||||
printf("Free 8bit-capable memory (after 32-bit): %dK, 32-bit capable memory %dK\n", free8, free32);
|
||||
//Only 32-bit should have gone down by alloc32: 32-bit isn't necessarily 8bit capable
|
||||
TEST_ASSERT(free32<=(free32start-alloc32));
|
||||
TEST_ASSERT(free8==free8start);
|
||||
free(m1);
|
||||
} else {
|
||||
printf("This platform has no 32-bit only capable RAM, jumping to next test \n");
|
||||
}
|
||||
|
||||
printf("Allocating impossible caps\n");
|
||||
m1= heap_caps_malloc(10*1024, MALLOC_CAP_8BIT|MALLOC_CAP_EXEC);
|
||||
printf("--> %p\n", m1);
|
||||
TEST_ASSERT(m1==NULL);
|
||||
|
||||
if(free_iram) {
|
||||
printf("Testing changeover iram -> dram");
|
||||
// priorities will exhaust IRAM first, then start allocating from DRAM
|
||||
for (x=0; x<10; x++) {
|
||||
m2[x]= heap_caps_malloc(alloc32, MALLOC_CAP_32BIT);
|
||||
printf("--> %p\n", m2[x]);
|
||||
}
|
||||
TEST_ASSERT((((int)m2[0])&0xFF000000)==0x40000000);
|
||||
TEST_ASSERT((((int)m2[9])&0xFF000000)==0x3F000000);
|
||||
|
||||
} else {
|
||||
printf("This platform has no IRAM-only so changeover will never occur, jumping to next test\n");
|
||||
}
|
||||
|
||||
printf("Test if allocating executable code still gives IRAM, even with dedicated IRAM region depleted\n");
|
||||
if(free_iram) {
|
||||
// (the allocation should come from D/IRAM)
|
||||
free_iram = heap_caps_get_free_size(MALLOC_CAP_EXEC);
|
||||
m1= heap_caps_malloc(MIN(free_iram / 2, 10*1024), MALLOC_CAP_EXEC);
|
||||
printf("--> %p\n", m1);
|
||||
TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
|
||||
for (x=0; x<10; x++) free(m2[x]);
|
||||
|
||||
} else {
|
||||
// (the allocation should come from D/IRAM)
|
||||
free_iram = heap_caps_get_free_size(MALLOC_CAP_EXEC);
|
||||
m1= heap_caps_malloc(MIN(free_iram / 2, 10*1024), MALLOC_CAP_EXEC);
|
||||
printf("--> %p\n", m1);
|
||||
TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000);
|
||||
}
|
||||
|
||||
free(m1);
|
||||
printf("Done.\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
|
||||
TEST_CASE("IRAM_8BIT capability test", "[heap]")
|
||||
{
|
||||
uint8_t *ptr;
|
||||
size_t free_size, free_size32, largest_free_size;
|
||||
|
||||
/* need to print something as first printf allocates some heap */
|
||||
printf("IRAM_8BIT capability test\n");
|
||||
|
||||
free_size = heap_caps_get_free_size(MALLOC_CAP_IRAM_8BIT);
|
||||
free_size32 = heap_caps_get_free_size(MALLOC_CAP_32BIT);
|
||||
|
||||
largest_free_size = heap_caps_get_largest_free_block(MALLOC_CAP_IRAM_8BIT);
|
||||
|
||||
ptr = heap_caps_malloc(largest_free_size, MALLOC_CAP_IRAM_8BIT);
|
||||
|
||||
TEST_ASSERT((((int)ptr)&0xFF000000)==0x40000000);
|
||||
|
||||
TEST_ASSERT(heap_caps_get_free_size(MALLOC_CAP_IRAM_8BIT) == (free_size - heap_caps_get_allocated_size(ptr)));
|
||||
TEST_ASSERT(heap_caps_get_free_size(MALLOC_CAP_32BIT) == (free_size32 - heap_caps_get_allocated_size(ptr)));
|
||||
|
||||
free(ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST_CASE("heap_caps metadata test", "[heap]")
|
||||
{
|
||||
/* need to print something as first printf allocates some heap */
|
||||
printf("heap_caps metadata test\n");
|
||||
heap_caps_print_heap_info(MALLOC_CAP_8BIT);
|
||||
|
||||
multi_heap_info_t original;
|
||||
heap_caps_get_info(&original, MALLOC_CAP_8BIT);
|
||||
|
||||
void *b = heap_caps_malloc(original.largest_free_block, MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(b);
|
||||
|
||||
printf("After allocating %d bytes:\n", original.largest_free_block);
|
||||
heap_caps_print_heap_info(MALLOC_CAP_8BIT);
|
||||
|
||||
multi_heap_info_t after;
|
||||
heap_caps_get_info(&after, MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT(after.largest_free_block <= original.largest_free_block);
|
||||
TEST_ASSERT(after.total_free_bytes <= original.total_free_bytes);
|
||||
|
||||
free(b);
|
||||
heap_caps_get_info(&after, MALLOC_CAP_8BIT);
|
||||
|
||||
printf("\n\n After test, heap status:\n");
|
||||
heap_caps_print_heap_info(MALLOC_CAP_8BIT);
|
||||
|
||||
/* Allow some leeway here, because LWIP sometimes allocates up to 144 bytes in the background
|
||||
as part of timer management.
|
||||
*/
|
||||
TEST_ASSERT_INT32_WITHIN(200, after.total_free_bytes, original.total_free_bytes);
|
||||
TEST_ASSERT_INT32_WITHIN(200, after.largest_free_block, original.largest_free_block);
|
||||
TEST_ASSERT(after.minimum_free_bytes < original.total_free_bytes);
|
||||
}
|
||||
|
||||
/* Small function runs from IRAM to check that malloc/free/realloc
|
||||
all work OK when cache is disabled...
|
||||
*/
|
||||
static IRAM_ATTR __attribute__((noinline)) bool iram_malloc_test(void)
|
||||
{
|
||||
spi_flash_guard_get()->start(); // Disables flash cache
|
||||
|
||||
bool result = true;
|
||||
void *x = heap_caps_malloc(64, MALLOC_CAP_EXEC);
|
||||
result = result && (x != NULL);
|
||||
void *y = heap_caps_realloc(x, 32, MALLOC_CAP_EXEC);
|
||||
result = result && (y != NULL);
|
||||
heap_caps_free(y);
|
||||
|
||||
spi_flash_guard_get()->end(); // Re-enables flash cache
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("heap_caps_xxx functions work with flash cache disabled", "[heap]")
|
||||
{
|
||||
TEST_ASSERT( iram_malloc_test() );
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
|
||||
TEST_CASE("When enabled, allocation operation failure generates an abort", "[heap][reset=abort,SW_CPU_RESET]")
|
||||
{
|
||||
const size_t stupid_allocation_size = (128 * 1024 * 1024);
|
||||
void *ptr = heap_caps_malloc(stupid_allocation_size, MALLOC_CAP_DEFAULT);
|
||||
(void)ptr;
|
||||
TEST_FAIL_MESSAGE("should not be reached");
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool called_user_failed_hook = false;
|
||||
|
||||
void heap_caps_alloc_failed_hook(size_t requested_size, uint32_t caps, const char *function_name)
|
||||
{
|
||||
printf("%s was called but failed to allocate %d bytes with 0x%X capabilities. \n",function_name, requested_size, caps);
|
||||
called_user_failed_hook = true;
|
||||
}
|
||||
|
||||
TEST_CASE("user provided alloc failed hook must be called when allocation fails", "[heap]")
|
||||
{
|
||||
TEST_ASSERT(heap_caps_register_failed_alloc_callback(heap_caps_alloc_failed_hook) == ESP_OK);
|
||||
|
||||
const size_t stupid_allocation_size = (128 * 1024 * 1024);
|
||||
void *ptr = heap_caps_malloc(stupid_allocation_size, MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(called_user_failed_hook != false);
|
||||
|
||||
called_user_failed_hook = false;
|
||||
ptr = heap_caps_realloc(ptr, stupid_allocation_size, MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(called_user_failed_hook != false);
|
||||
|
||||
called_user_failed_hook = false;
|
||||
ptr = heap_caps_aligned_alloc(0x200, stupid_allocation_size, MALLOC_CAP_DEFAULT);
|
||||
TEST_ASSERT(called_user_failed_hook != false);
|
||||
|
||||
(void)ptr;
|
||||
}
|
||||
|
||||
TEST_CASE("allocation with invalid capability should also trigger the alloc failed hook", "[heap]")
|
||||
{
|
||||
const size_t allocation_size = 64;
|
||||
const uint32_t invalid_cap = MALLOC_CAP_INVALID;
|
||||
|
||||
TEST_ASSERT(heap_caps_register_failed_alloc_callback(heap_caps_alloc_failed_hook) == ESP_OK);
|
||||
|
||||
called_user_failed_hook = false;
|
||||
void *ptr = heap_caps_malloc(allocation_size, invalid_cap);
|
||||
TEST_ASSERT(called_user_failed_hook != false);
|
||||
|
||||
called_user_failed_hook = false;
|
||||
ptr = heap_caps_realloc(ptr, allocation_size, invalid_cap);
|
||||
TEST_ASSERT(called_user_failed_hook != false);
|
||||
|
||||
called_user_failed_hook = false;
|
||||
ptr = heap_caps_aligned_alloc(0x200, allocation_size, invalid_cap);
|
||||
TEST_ASSERT(called_user_failed_hook != false);
|
||||
|
||||
(void)ptr;
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
Generic test for realloc
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "unity.h"
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#include "soc/soc_memory_layout.h"
|
||||
|
||||
|
||||
#ifndef CONFIG_HEAP_POISONING_COMPREHENSIVE
|
||||
/* (can't realloc in place if comprehensive is enabled) */
|
||||
|
||||
TEST_CASE("realloc shrink buffer in place", "[heap]")
|
||||
{
|
||||
void *x = malloc(64);
|
||||
TEST_ASSERT(x);
|
||||
void *y = realloc(x, 48);
|
||||
TEST_ASSERT_EQUAL_PTR(x, y);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
|
||||
TEST_CASE("realloc shrink buffer with EXEC CAPS", "[heap]")
|
||||
{
|
||||
const size_t buffer_size = 64;
|
||||
|
||||
void *x = heap_caps_malloc(buffer_size, MALLOC_CAP_EXEC);
|
||||
TEST_ASSERT(x);
|
||||
void *y = heap_caps_realloc(x, buffer_size - 16, MALLOC_CAP_EXEC);
|
||||
TEST_ASSERT(y);
|
||||
|
||||
//y needs to fall in a compatible memory area of IRAM:
|
||||
TEST_ASSERT(esp_ptr_executable(y)|| esp_ptr_in_iram(y) || esp_ptr_in_diram_iram(y));
|
||||
|
||||
free(y);
|
||||
}
|
||||
|
||||
TEST_CASE("realloc move data to a new heap type", "[heap]")
|
||||
{
|
||||
const char *test = "I am some test content to put in the heap";
|
||||
char buf[64];
|
||||
memset(buf, 0xEE, 64);
|
||||
strlcpy(buf, test, 64);
|
||||
|
||||
char *a = malloc(64);
|
||||
memcpy(a, buf, 64);
|
||||
// move data from 'a' to IRAM
|
||||
char *b = heap_caps_realloc(a, 64, MALLOC_CAP_EXEC);
|
||||
TEST_ASSERT_NOT_NULL(b);
|
||||
TEST_ASSERT_NOT_EQUAL(a, b);
|
||||
TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_INVALID, true));
|
||||
TEST_ASSERT_EQUAL_HEX32_ARRAY(buf, b, 64 / sizeof(uint32_t));
|
||||
|
||||
// Move data back to DRAM
|
||||
char *c = heap_caps_realloc(b, 48, MALLOC_CAP_8BIT);
|
||||
TEST_ASSERT_NOT_NULL(c);
|
||||
TEST_ASSERT_NOT_EQUAL(b, c);
|
||||
TEST_ASSERT(heap_caps_check_integrity(MALLOC_CAP_INVALID, true));
|
||||
TEST_ASSERT_EQUAL_HEX8_ARRAY(buf, c, 48);
|
||||
|
||||
free(c);
|
||||
}
|
||||
#endif
|
||||
@@ -1,72 +0,0 @@
|
||||
/*
|
||||
Tests for registering new heap memory at runtime
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include "unity.h"
|
||||
#include "esp_heap_caps_init.h"
|
||||
#include "esp_system.h"
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
/* NOTE: This is not a well-formed unit test, it leaks memory */
|
||||
TEST_CASE("Allocate new heap at runtime", "[heap][ignore]")
|
||||
{
|
||||
const size_t BUF_SZ = 1000;
|
||||
const size_t HEAP_OVERHEAD_MAX = 200;
|
||||
void *buffer = malloc(BUF_SZ);
|
||||
TEST_ASSERT_NOT_NULL(buffer);
|
||||
uint32_t before_free = esp_get_free_heap_size();
|
||||
TEST_ESP_OK( heap_caps_add_region((intptr_t)buffer, (intptr_t)buffer + BUF_SZ) );
|
||||
uint32_t after_free = esp_get_free_heap_size();
|
||||
printf("Before %u after %u\n", before_free, after_free);
|
||||
/* allow for some 'heap overhead' from accounting structures */
|
||||
TEST_ASSERT(after_free >= before_free + BUF_SZ - HEAP_OVERHEAD_MAX);
|
||||
}
|
||||
|
||||
/* NOTE: This is not a well-formed unit test, it leaks memory and
|
||||
may fail if run twice in a row without a reset.
|
||||
*/
|
||||
TEST_CASE("Allocate new heap with new capability", "[heap][ignore]")
|
||||
{
|
||||
const size_t BUF_SZ = 100;
|
||||
#ifdef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
|
||||
const size_t ALLOC_SZ = 32;
|
||||
#else
|
||||
const size_t ALLOC_SZ = 64; // More than half of BUF_SZ
|
||||
#endif
|
||||
const uint32_t MALLOC_CAP_INVENTED = (1 << 30); /* this must be unused in esp_heap_caps.h */
|
||||
|
||||
/* no memory exists to provide this capability */
|
||||
TEST_ASSERT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );
|
||||
|
||||
void *buffer = malloc(BUF_SZ);
|
||||
TEST_ASSERT_NOT_NULL(buffer);
|
||||
uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS] = { MALLOC_CAP_INVENTED };
|
||||
TEST_ESP_OK( heap_caps_add_region_with_caps(caps, (intptr_t)buffer, (intptr_t)buffer + BUF_SZ) );
|
||||
|
||||
/* ta-da, it's now possible! */
|
||||
TEST_ASSERT_NOT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );
|
||||
}
|
||||
|
||||
/* NOTE: This is not a well-formed unit test.
|
||||
* If run twice without a reset, it will failed.
|
||||
*/
|
||||
|
||||
TEST_CASE("Add .bss memory to heap region runtime", "[heap][ignore]")
|
||||
{
|
||||
#define BUF_SZ 1000
|
||||
#define HEAP_OVERHEAD_MAX 200
|
||||
static uint8_t s_buffer[BUF_SZ];
|
||||
|
||||
printf("s_buffer start %08x end %08x\n", (intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ);
|
||||
uint32_t before_free = esp_get_free_heap_size();
|
||||
TEST_ESP_OK( heap_caps_add_region((intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ) );
|
||||
uint32_t after_free = esp_get_free_heap_size();
|
||||
printf("Before %u after %u\n", before_free, after_free);
|
||||
/* allow for some 'heap overhead' from accounting structures */
|
||||
TEST_ASSERT(after_free >= before_free + BUF_SZ - HEAP_OVERHEAD_MAX);
|
||||
|
||||
/* Twice add must be failed */
|
||||
TEST_ASSERT( (heap_caps_add_region((intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ) != ESP_OK) );
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
TEST_PROGRAM=test_multi_heap
|
||||
all: $(TEST_PROGRAM)
|
||||
|
||||
ifneq ($(filter clean,$(MAKECMDGOALS)),)
|
||||
.NOTPARALLEL: # prevent make clean racing the other targets
|
||||
endif
|
||||
|
||||
SOURCE_FILES = $(abspath \
|
||||
../multi_heap.c \
|
||||
../heap_tlsf.c \
|
||||
../multi_heap_poisoning.c \
|
||||
test_multi_heap.cpp \
|
||||
main.cpp \
|
||||
)
|
||||
|
||||
INCLUDE_FLAGS = -I../include -I../../../tools/catch
|
||||
|
||||
GCOV ?= gcov
|
||||
|
||||
CPPFLAGS += $(INCLUDE_FLAGS) -D CONFIG_LOG_DEFAULT_LEVEL -g -fstack-protector-all -m32 -DCONFIG_HEAP_POISONING_COMPREHENSIVE
|
||||
CFLAGS += -Wall -Werror -fprofile-arcs -ftest-coverage
|
||||
CXXFLAGS += -std=c++11 -Wall -Werror -fprofile-arcs -ftest-coverage
|
||||
LDFLAGS += -lstdc++ -fprofile-arcs -ftest-coverage -m32
|
||||
|
||||
OBJ_FILES = $(filter %.o, $(SOURCE_FILES:.cpp=.o) $(SOURCE_FILES:.c=.o))
|
||||
|
||||
COVERAGE_FILES = $(OBJ_FILES:.o=.gc*)
|
||||
|
||||
$(TEST_PROGRAM): $(OBJ_FILES)
|
||||
g++ $(LDFLAGS) -o $(TEST_PROGRAM) $(OBJ_FILES)
|
||||
|
||||
$(OUTPUT_DIR):
|
||||
mkdir -p $(OUTPUT_DIR)
|
||||
|
||||
test: $(TEST_PROGRAM)
|
||||
./$(TEST_PROGRAM)
|
||||
|
||||
$(COVERAGE_FILES): $(TEST_PROGRAM) test
|
||||
|
||||
coverage.info: $(COVERAGE_FILES)
|
||||
find ../ -name "*.gcno" -exec $(GCOV) -r -pb {} +
|
||||
lcov --capture --directory $(abspath ../) --no-external --output-file coverage.info --gcov-tool $(GCOV)
|
||||
|
||||
coverage_report: coverage.info
|
||||
genhtml coverage.info --output-directory coverage_report
|
||||
@echo "Coverage report is in coverage_report/index.html"
|
||||
|
||||
clean:
|
||||
rm -f $(OBJ_FILES) $(TEST_PROGRAM)
|
||||
rm -f $(COVERAGE_FILES) *.gcov
|
||||
rm -rf coverage_report/
|
||||
rm -f coverage.info
|
||||
|
||||
.PHONY: clean all test
|
||||
@@ -1,2 +0,0 @@
|
||||
#define CATCH_CONFIG_MAIN
|
||||
#include "catch.hpp"
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Run the test suite with all configurations enabled
|
||||
#
|
||||
|
||||
FAIL=0
|
||||
|
||||
for FLAGS in "CONFIG_HEAP_POISONING_NONE" "CONFIG_HEAP_POISONING_LIGHT" "CONFIG_HEAP_POISONING_COMPREHENSIVE" ; do
|
||||
echo "==== Testing with config: ${FLAGS} ===="
|
||||
CPPFLAGS="-D${FLAGS}" make clean test || FAIL=1
|
||||
done
|
||||
|
||||
make clean
|
||||
|
||||
if [ $FAIL == 0 ]; then
|
||||
echo "All configurations passed"
|
||||
else
|
||||
echo "Some configurations failed, see log."
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,508 +0,0 @@
|
||||
#include "catch.hpp"
|
||||
#include "multi_heap.h"
|
||||
|
||||
#include "../multi_heap_config.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
static void *__malloc__(size_t bytes)
|
||||
{
|
||||
return malloc(bytes);
|
||||
}
|
||||
|
||||
static void __free__(void *ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
/* Insurance against accidentally using libc heap functions in tests */
|
||||
#undef free
|
||||
#define free #error
|
||||
#undef malloc
|
||||
#define malloc #error
|
||||
#undef calloc
|
||||
#define calloc #error
|
||||
#undef realloc
|
||||
#define realloc #error
|
||||
|
||||
TEST_CASE("multi_heap simple allocations", "[multi_heap]")
|
||||
{
|
||||
uint8_t small_heap[4 * 1024];
|
||||
|
||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||
|
||||
size_t test_alloc_size = (multi_heap_free_size(heap) + 4) / 2;
|
||||
|
||||
printf("New heap:\n");
|
||||
multi_heap_dump(heap);
|
||||
printf("*********************\n");
|
||||
|
||||
uint8_t *buf = (uint8_t *)multi_heap_malloc(heap, test_alloc_size);
|
||||
|
||||
printf("small_heap %p buf %p\n", small_heap, buf);
|
||||
REQUIRE( buf != NULL );
|
||||
REQUIRE((intptr_t)buf >= (intptr_t)small_heap);
|
||||
REQUIRE( (intptr_t)buf < (intptr_t)(small_heap + sizeof(small_heap)));
|
||||
|
||||
REQUIRE( multi_heap_get_allocated_size(heap, buf) >= test_alloc_size );
|
||||
REQUIRE( multi_heap_get_allocated_size(heap, buf) < test_alloc_size + 16);
|
||||
|
||||
memset(buf, 0xEE, test_alloc_size);
|
||||
|
||||
REQUIRE( multi_heap_malloc(heap, test_alloc_size) == NULL );
|
||||
|
||||
multi_heap_free(heap, buf);
|
||||
|
||||
printf("Empty?\n");
|
||||
multi_heap_dump(heap);
|
||||
printf("*********************\n");
|
||||
|
||||
/* Now there should be space for another allocation */
|
||||
buf = (uint8_t *)multi_heap_malloc(heap, test_alloc_size);
|
||||
REQUIRE( buf != NULL );
|
||||
multi_heap_free(heap, buf);
|
||||
|
||||
REQUIRE( multi_heap_free_size(heap) > multi_heap_minimum_free_size(heap) );
|
||||
}
|
||||
|
||||
|
||||
TEST_CASE("multi_heap fragmentation", "[multi_heap]")
|
||||
{
|
||||
uint8_t small_heap[4 * 1024];
|
||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||
|
||||
const size_t alloc_size = 128;
|
||||
|
||||
void *p[4];
|
||||
for (int i = 0; i < 4; i++) {
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
p[i] = multi_heap_malloc(heap, alloc_size);
|
||||
printf("%d = %p ****->\n", i, p[i]);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( p[i] != NULL );
|
||||
}
|
||||
|
||||
printf("allocated %p %p %p %p\n", p[0], p[1], p[2], p[3]);
|
||||
|
||||
REQUIRE( multi_heap_malloc(heap, alloc_size * 5) == NULL ); /* no room to allocate 5*alloc_size now */
|
||||
|
||||
printf("4 allocations:\n");
|
||||
multi_heap_dump(heap);
|
||||
printf("****************\n");
|
||||
|
||||
multi_heap_free(heap, p[0]);
|
||||
multi_heap_free(heap, p[1]);
|
||||
multi_heap_free(heap, p[3]);
|
||||
|
||||
printf("1 allocations:\n");
|
||||
multi_heap_dump(heap);
|
||||
printf("****************\n");
|
||||
|
||||
void *big = multi_heap_malloc(heap, alloc_size * 3);
|
||||
//Blocks in TLSF are organized in different form, so this makes no sense
|
||||
multi_heap_free(heap, big);
|
||||
|
||||
multi_heap_free(heap, p[2]);
|
||||
|
||||
printf("0 allocations:\n");
|
||||
multi_heap_dump(heap);
|
||||
printf("****************\n");
|
||||
|
||||
big = multi_heap_malloc(heap, alloc_size * 2);
|
||||
//Blocks in TLSF are organized in different form, so this makes no sense
|
||||
multi_heap_free(heap, big);
|
||||
}
|
||||
|
||||
/* Test that malloc/free does not leave free space fragmented */
|
||||
TEST_CASE("multi_heap defrag", "[multi_heap]")
|
||||
{
|
||||
void *p[4];
|
||||
uint8_t small_heap[4 * 1024];
|
||||
multi_heap_info_t info, info2;
|
||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||
|
||||
printf("0 ---\n");
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
multi_heap_get_info(heap, &info);
|
||||
REQUIRE( 0 == info.allocated_blocks );
|
||||
REQUIRE( 1 == info.free_blocks );
|
||||
|
||||
printf("1 ---\n");
|
||||
p[0] = multi_heap_malloc(heap, 128);
|
||||
p[1] = multi_heap_malloc(heap, 32);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
|
||||
printf("2 ---\n");
|
||||
multi_heap_free(heap, p[0]);
|
||||
p[2] = multi_heap_malloc(heap, 64);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( p[2] == p[0] );
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
|
||||
printf("3 ---\n");
|
||||
multi_heap_free(heap, p[2]);
|
||||
p[3] = multi_heap_malloc(heap, 32);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( p[3] == p[0] );
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
|
||||
multi_heap_get_info(heap, &info2);
|
||||
REQUIRE( 2 == info2.allocated_blocks );
|
||||
REQUIRE( 2 == info2.free_blocks );
|
||||
|
||||
multi_heap_free(heap, p[0]);
|
||||
multi_heap_free(heap, p[1]);
|
||||
multi_heap_get_info(heap, &info2);
|
||||
REQUIRE( 0 == info2.allocated_blocks );
|
||||
REQUIRE( 1 == info2.free_blocks );
|
||||
REQUIRE( info.total_free_bytes == info2.total_free_bytes );
|
||||
}
|
||||
|
||||
/* Test that malloc/free does not leave free space fragmented
|
||||
Note: With fancy poisoning, realloc is implemented as malloc-copy-free and this test does not apply.
|
||||
*/
|
||||
#ifndef MULTI_HEAP_POISONING_SLOW
|
||||
TEST_CASE("multi_heap defrag realloc", "[multi_heap]")
|
||||
{
|
||||
void *p[4];
|
||||
uint8_t small_heap[4 * 1024];
|
||||
multi_heap_info_t info, info2;
|
||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||
|
||||
printf("0 ---\n");
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
multi_heap_get_info(heap, &info);
|
||||
REQUIRE( 0 == info.allocated_blocks );
|
||||
REQUIRE( 1 == info.free_blocks );
|
||||
|
||||
printf("1 ---\n");
|
||||
p[0] = multi_heap_malloc(heap, 128);
|
||||
p[1] = multi_heap_malloc(heap, 32);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
|
||||
printf("2 ---\n");
|
||||
p[2] = multi_heap_realloc(heap, p[0], 64);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( p[2] == p[0] );
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
|
||||
printf("3 ---\n");
|
||||
p[3] = multi_heap_realloc(heap, p[2], 32);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE( p[3] == p[0] );
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
|
||||
multi_heap_get_info(heap, &info2);
|
||||
REQUIRE( 2 == info2.allocated_blocks );
|
||||
REQUIRE( 2 == info2.free_blocks );
|
||||
|
||||
multi_heap_free(heap, p[0]);
|
||||
multi_heap_free(heap, p[1]);
|
||||
multi_heap_get_info(heap, &info2);
|
||||
REQUIRE( 0 == info2.allocated_blocks );
|
||||
REQUIRE( 1 == info2.free_blocks );
|
||||
REQUIRE( info.total_free_bytes == info2.total_free_bytes );
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void multi_heap_allocation_impl(int heap_size)
|
||||
{
|
||||
uint8_t *big_heap = (uint8_t *) __malloc__(2*heap_size);
|
||||
const int NUM_POINTERS = 64;
|
||||
|
||||
printf("Running multi-allocation test with heap_size %d...\n", heap_size);
|
||||
|
||||
REQUIRE( big_heap );
|
||||
multi_heap_handle_t heap = multi_heap_register(big_heap, heap_size);
|
||||
|
||||
void *p[NUM_POINTERS] = { 0 };
|
||||
size_t s[NUM_POINTERS] = { 0 };
|
||||
|
||||
const size_t initial_free = multi_heap_free_size(heap);
|
||||
|
||||
const int ITERATIONS = 10000;
|
||||
|
||||
for (int i = 0; i < ITERATIONS; i++) {
|
||||
/* check all pointers allocated so far are valid inside big_heap */
|
||||
for (int j = 0; j < NUM_POINTERS; j++) {
|
||||
if (p[j] != NULL) {
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t n = rand() % NUM_POINTERS;
|
||||
|
||||
if (rand() % 4 == 0) {
|
||||
/* 1 in 4 iterations, try to realloc the buffer instead
|
||||
of using malloc/free
|
||||
*/
|
||||
size_t new_size = rand() % 1024;
|
||||
void *new_p = multi_heap_realloc(heap, p[n], new_size);
|
||||
printf("realloc %p -> %p (%zu -> %zu)\n", p[n], new_p, s[n], new_size);
|
||||
multi_heap_check(heap, true);
|
||||
if (new_size == 0 || new_p != NULL) {
|
||||
p[n] = new_p;
|
||||
s[n] = new_size;
|
||||
if (new_size > 0) {
|
||||
REQUIRE( p[n] >= big_heap );
|
||||
REQUIRE( p[n] < big_heap + heap_size );
|
||||
memset(p[n], n, new_size);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (p[n] != NULL) {
|
||||
if (s[n] > 0) {
|
||||
/* Verify pre-existing contents of p[n] */
|
||||
uint8_t compare[s[n]];
|
||||
memset(compare, n, s[n]);
|
||||
/*REQUIRE*/assert( memcmp(compare, p[n], s[n]) == 0 );
|
||||
}
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
multi_heap_free(heap, p[n]);
|
||||
printf("freed %p (%zu)\n", p[n], s[n]);
|
||||
if (!multi_heap_check(heap, true)) {
|
||||
printf("FAILED iteration %d after freeing %p\n", i, p[n]);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE(0);
|
||||
}
|
||||
}
|
||||
|
||||
s[n] = rand() % 1024;
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
p[n] = multi_heap_malloc(heap, s[n]);
|
||||
printf("malloc %p (%zu)\n", p[n], s[n]);
|
||||
if (p[n] != NULL) {
|
||||
REQUIRE( p[n] >= big_heap );
|
||||
REQUIRE( p[n] < big_heap + heap_size );
|
||||
}
|
||||
if (!multi_heap_check(heap, true)) {
|
||||
printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE(0);
|
||||
}
|
||||
if (p[n] != NULL) {
|
||||
memset(p[n], n, s[n]);
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < NUM_POINTERS; i++) {
|
||||
multi_heap_free(heap, p[i]);
|
||||
if (!multi_heap_check(heap, true)) {
|
||||
printf("FAILED during cleanup after freeing %p\n", p[i]);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE(0);
|
||||
}
|
||||
}
|
||||
|
||||
REQUIRE( initial_free == multi_heap_free_size(heap) );
|
||||
__free__(big_heap);
|
||||
}
|
||||
|
||||
TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
||||
{
|
||||
size_t poolsize[] = { 15, 255, 4095, 8191 };
|
||||
for (size_t i = 0; i < sizeof(poolsize)/sizeof(size_t); i++) {
|
||||
multi_heap_allocation_impl(poolsize[i] * 1024);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("multi_heap_get_info() function", "[multi_heap]")
|
||||
{
|
||||
uint8_t heapdata[4 * 1024];
|
||||
multi_heap_handle_t heap = multi_heap_register(heapdata, sizeof(heapdata));
|
||||
multi_heap_info_t before, after, freed;
|
||||
|
||||
multi_heap_get_info(heap, &before);
|
||||
printf("before: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
|
||||
before.total_free_bytes,
|
||||
before.total_allocated_bytes,
|
||||
before.largest_free_block,
|
||||
before.minimum_free_bytes,
|
||||
before.allocated_blocks,
|
||||
before.free_blocks,
|
||||
before.total_blocks);
|
||||
|
||||
REQUIRE( 0 == before.allocated_blocks );
|
||||
REQUIRE( 0 == before.total_allocated_bytes );
|
||||
REQUIRE( before.total_free_bytes == before.minimum_free_bytes );
|
||||
|
||||
void *x = multi_heap_malloc(heap, 32);
|
||||
multi_heap_get_info(heap, &after);
|
||||
printf("after: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
|
||||
after.total_free_bytes,
|
||||
after.total_allocated_bytes,
|
||||
after.largest_free_block,
|
||||
after.minimum_free_bytes,
|
||||
after.allocated_blocks,
|
||||
after.free_blocks,
|
||||
after.total_blocks);
|
||||
|
||||
REQUIRE( 1 == after.allocated_blocks );
|
||||
REQUIRE( 32 == after.total_allocated_bytes );
|
||||
REQUIRE( after.minimum_free_bytes < before.minimum_free_bytes);
|
||||
REQUIRE( after.minimum_free_bytes > 0 );
|
||||
|
||||
multi_heap_free(heap, x);
|
||||
multi_heap_get_info(heap, &freed);
|
||||
printf("freed: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n",
|
||||
freed.total_free_bytes,
|
||||
freed.total_allocated_bytes,
|
||||
freed.largest_free_block,
|
||||
freed.minimum_free_bytes,
|
||||
freed.allocated_blocks,
|
||||
freed.free_blocks,
|
||||
freed.total_blocks);
|
||||
|
||||
REQUIRE( 0 == freed.allocated_blocks );
|
||||
REQUIRE( 0 == freed.total_allocated_bytes );
|
||||
REQUIRE( before.total_free_bytes == freed.total_free_bytes );
|
||||
REQUIRE( after.minimum_free_bytes == freed.minimum_free_bytes );
|
||||
}
|
||||
|
||||
TEST_CASE("multi_heap minimum-size allocations", "[multi_heap]")
|
||||
{
|
||||
uint8_t heapdata[4096];
|
||||
void *p[sizeof(heapdata) / sizeof(void *)] = {NULL};
|
||||
const size_t NUM_P = sizeof(p) / sizeof(void *);
|
||||
size_t allocated_size = 0;
|
||||
multi_heap_handle_t heap = multi_heap_register(heapdata, sizeof(heapdata));
|
||||
size_t before_free = multi_heap_free_size(heap);
|
||||
|
||||
size_t i;
|
||||
for (i = 0; i < NUM_P; i++) {
|
||||
//TLSF minimum block size is 4 bytes
|
||||
p[i] = multi_heap_malloc(heap, 1);
|
||||
if (p[i] == NULL) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
REQUIRE( i < NUM_P); // Should have run out of heap before we ran out of pointers
|
||||
printf("Allocated %zu minimum size chunks\n", i);
|
||||
|
||||
REQUIRE(multi_heap_free_size(heap) < before_free);
|
||||
multi_heap_check(heap, true);
|
||||
|
||||
/* Free in random order */
|
||||
bool has_allocations = true;
|
||||
while (has_allocations) {
|
||||
i = rand() % NUM_P;
|
||||
multi_heap_free(heap, p[i]);
|
||||
p[i] = NULL;
|
||||
multi_heap_check(heap, true);
|
||||
|
||||
has_allocations = false;
|
||||
for (i = 0; i < NUM_P && !has_allocations; i++) {
|
||||
has_allocations = (p[i] != NULL);
|
||||
}
|
||||
}
|
||||
|
||||
/* all freed! */
|
||||
REQUIRE( before_free == multi_heap_free_size(heap) );
|
||||
}
|
||||
|
||||
TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
||||
{
|
||||
const uint32_t PATTERN = 0xABABDADA;
|
||||
uint8_t small_heap[4 * 1024];
|
||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||
|
||||
uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64);
|
||||
uint32_t *b = (uint32_t *)multi_heap_malloc(heap, 32);
|
||||
REQUIRE( a != NULL );
|
||||
REQUIRE( b != NULL );
|
||||
REQUIRE( b > a); /* 'b' takes the block after 'a' */
|
||||
|
||||
*a = PATTERN;
|
||||
|
||||
uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72);
|
||||
REQUIRE( multi_heap_check(heap, true));
|
||||
REQUIRE( c != NULL );
|
||||
REQUIRE( c > b ); /* 'a' moves, 'c' takes the block after 'b' */
|
||||
REQUIRE( *c == PATTERN );
|
||||
|
||||
#ifndef MULTI_HEAP_POISONING_SLOW
|
||||
// "Slow" poisoning implementation doesn't reallocate in place, so these
|
||||
// test will fail...
|
||||
|
||||
uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
REQUIRE( c == d ); /* 'c' block should be shrunk in-place */
|
||||
REQUIRE( *d == PATTERN);
|
||||
|
||||
uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 64);
|
||||
REQUIRE( multi_heap_check(heap, true));
|
||||
REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */
|
||||
|
||||
multi_heap_free(heap, d);
|
||||
uint32_t *f = (uint32_t *)multi_heap_realloc(heap, b, 64);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */
|
||||
|
||||
#ifdef MULTI_HEAP_POISONING
|
||||
#define TOO_MUCH 7420 + 1
|
||||
#else
|
||||
#define TOO_MUCH 7420 + 1
|
||||
#endif
|
||||
/* not enough contiguous space left in the heap */
|
||||
uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, TOO_MUCH);
|
||||
REQUIRE( g == NULL );
|
||||
|
||||
multi_heap_free(heap, f);
|
||||
/* try again */
|
||||
g = (uint32_t *)multi_heap_realloc(heap, e, 128);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */
|
||||
#endif
|
||||
}
|
||||
|
||||
// TLSF only accepts heaps aligned to 4-byte boundary so
|
||||
// only aligned allocation tests make sense.
|
||||
TEST_CASE("multi_heap aligned allocations", "[multi_heap]")
|
||||
{
|
||||
uint8_t test_heap[4 * 1024];
|
||||
multi_heap_handle_t heap = multi_heap_register(test_heap, sizeof(test_heap));
|
||||
uint32_t aligments = 0; // starts from alignment by 4-byte boundary
|
||||
size_t old_size = multi_heap_free_size(heap);
|
||||
size_t leakage = 1024;
|
||||
printf("[ALIGNED_ALLOC] heap_size before: %d \n", old_size);
|
||||
|
||||
printf("New heap:\n");
|
||||
multi_heap_dump(heap);
|
||||
printf("*********************\n");
|
||||
|
||||
for(;aligments <= 256; aligments++) {
|
||||
|
||||
//Use some stupid size value to test correct alignment even in strange
|
||||
//memory layout objects:
|
||||
uint8_t *buf = (uint8_t *)multi_heap_aligned_alloc(heap, (aligments + 137), aligments );
|
||||
if(((aligments & (aligments - 1)) != 0) || (!aligments)) {
|
||||
REQUIRE( buf == NULL );
|
||||
} else {
|
||||
REQUIRE( buf != NULL );
|
||||
REQUIRE((intptr_t)buf >= (intptr_t)test_heap);
|
||||
REQUIRE((intptr_t)buf < (intptr_t)(test_heap + sizeof(test_heap)));
|
||||
|
||||
printf("[ALIGNED_ALLOC] alignment required: %u \n", aligments);
|
||||
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
|
||||
//Address of obtained block must be aligned with selected value
|
||||
REQUIRE(((intptr_t)buf & (aligments - 1)) == 0);
|
||||
|
||||
//Write some data, if it corrupts memory probably the heap
|
||||
//canary verification will fail:
|
||||
memset(buf, 0xA5, (aligments + 137));
|
||||
|
||||
multi_heap_free(heap, buf);
|
||||
}
|
||||
}
|
||||
|
||||
printf("[ALIGNED_ALLOC] heap_size after: %d \n", multi_heap_free_size(heap));
|
||||
REQUIRE((old_size - multi_heap_free_size(heap)) <= leakage);
|
||||
}
|
||||
@@ -9,8 +9,6 @@ idf_component_register( SRCS
|
||||
INCLUDE_DIRS .
|
||||
REQUIRES nvs_flash
|
||||
PRIV_REQUIRES console app_update tools services spi_flash platform_config vfs pthread wifi-manager platform_config newlib telnet display squeezelite tools)
|
||||
target_link_libraries(${COMPONENT_LIB} "-Wl,--undefined=GDS_DrawPixelFast")
|
||||
target_link_libraries(${COMPONENT_LIB} ${build_dir}/esp-idf/$<TARGET_PROPERTY:RECOVERY_PREFIX>/lib$<TARGET_PROPERTY:RECOVERY_PREFIX>.a )
|
||||
|
||||
set_source_files_properties(cmd_config.c
|
||||
PROPERTIES COMPILE_FLAGS
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "soc/rtc_cntl_reg.h"
|
||||
#include "esp32/rom/uart.h"
|
||||
#include "sdkconfig.h"
|
||||
#include "platform_console.h"
|
||||
#include "messaging.h"
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "soc/rtc_cntl_reg.h"
|
||||
#include "esp32/rom/uart.h"
|
||||
#include "esp_rom_uart.h"
|
||||
#include "cmd_system.h"
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_partition.h"
|
||||
@@ -28,7 +28,6 @@
|
||||
#include "platform_esp32.h"
|
||||
#include "platform_config.h"
|
||||
#include "esp_sleep.h"
|
||||
#include "driver/uart.h" // for the uart driver access
|
||||
#include "messaging.h"
|
||||
#include "platform_console.h"
|
||||
#include "tools.h"
|
||||
@@ -791,7 +790,7 @@ static int light_sleep(int argc, char **argv)
|
||||
ESP_ERROR_CHECK( esp_sleep_enable_uart_wakeup(CONFIG_ESP_CONSOLE_UART_NUM) );
|
||||
}
|
||||
fflush(stdout);
|
||||
uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
|
||||
esp_rom_uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
|
||||
esp_light_sleep_start();
|
||||
esp_sleep_wakeup_cause_t cause = esp_sleep_get_wakeup_cause();
|
||||
const char *cause_str;
|
||||
|
||||
@@ -424,7 +424,7 @@ static int dmap_parse_internal(const dmap_settings *settings, const char *buf, s
|
||||
/* Make a best guess of the type */
|
||||
field_type = DMAP_UNKNOWN;
|
||||
field_name = code;
|
||||
|
||||
#ifdef DMAP_FULL
|
||||
if (field_len >= 8) {
|
||||
/* Look for a four char code followed by a length within the current field */
|
||||
if (isalpha(p[0] & 0xff) &&
|
||||
@@ -448,6 +448,7 @@ static int dmap_parse_internal(const dmap_settings *settings, const char *buf, s
|
||||
|
||||
field_type = is_string ? DMAP_STR : DMAP_UINT;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
switch (field_type) {
|
||||
|
||||
@@ -124,7 +124,7 @@ struct raop_ctx_s *raop_create(uint32_t host, char *name,
|
||||
char *txt[] = { "am=airesp32", "tp=UDP", "sm=false", "sv=false", "ek=1",
|
||||
"et=0,1", "md=0,1,2", "cn=0,1", "ch=2",
|
||||
"ss=16", "sr=44100", "vn=3", "txtvers=1",
|
||||
NULL };
|
||||
NULL };
|
||||
#else
|
||||
const mdns_txt_item_t txt[] = {
|
||||
{"am", "airesp32"},
|
||||
@@ -765,7 +765,7 @@ static void search_remote(void *args) {
|
||||
// can't use xNotifyGive as it seems LWIP is using it as well
|
||||
xSemaphoreGive(ctx->active_remote.destroy_mutex);
|
||||
vTaskSuspend(NULL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -853,7 +853,7 @@ static char *rsa_apply(unsigned char *input, int inlen, int *outlen, int mode)
|
||||
}
|
||||
|
||||
mbedtls_pk_free(&pkctx);
|
||||
|
||||
|
||||
return (char*) outbuf;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ set(BELL_DISABLE_SINKS ON)
|
||||
set(BELL_DISABLE_FMT ON)
|
||||
set(BELL_DISABLE_REGEX ON)
|
||||
set(BELL_ONLY_CJSON ON)
|
||||
set(BELL_DISABLE_MQTT ON)
|
||||
set(BELL_DISABLE_WEBSERVER ON)
|
||||
set(CSPOT_TARGET_ESP32 ON)
|
||||
|
||||
# because CMake is so broken, the cache set below overrides a normal "set" for the first build
|
||||
|
||||
@@ -34,78 +34,6 @@
|
||||
|
||||
static class cspotPlayer *player;
|
||||
|
||||
/****************************************************************************************
|
||||
* Chunk manager class (task)
|
||||
*/
|
||||
|
||||
class chunkManager : public bell::Task {
|
||||
public:
|
||||
std::atomic<bool> isRunning = true;
|
||||
std::atomic<bool> isPaused = true;
|
||||
chunkManager(std::function<void()> trackHandler, std::function<void(const uint8_t*, size_t)> dataHandler);
|
||||
size_t writePCM(uint8_t* data, size_t bytes, std::string_view trackId, size_t sequence);
|
||||
void flush();
|
||||
void teardown();
|
||||
|
||||
private:
|
||||
std::unique_ptr<bell::CentralAudioBuffer> centralAudioBuffer;
|
||||
std::function<void()> trackHandler;
|
||||
std::function<void(const uint8_t*, size_t)> dataHandler;
|
||||
std::mutex runningMutex;
|
||||
|
||||
void runTask() override;
|
||||
};
|
||||
|
||||
chunkManager::chunkManager(std::function<void()> trackHandler, std::function<void(const uint8_t*, size_t)> dataHandler)
|
||||
: bell::Task("chunker", 4 * 1024, 0, 0) {
|
||||
this->centralAudioBuffer = std::make_unique<bell::CentralAudioBuffer>(32);
|
||||
this->trackHandler = trackHandler;
|
||||
this->dataHandler = dataHandler;
|
||||
startTask();
|
||||
}
|
||||
|
||||
size_t chunkManager::writePCM(uint8_t* data, size_t bytes, std::string_view trackId, size_t sequence) {
|
||||
return centralAudioBuffer->writePCM(data, bytes, sequence);
|
||||
}
|
||||
|
||||
void chunkManager::teardown() {
|
||||
isRunning = false;
|
||||
std::scoped_lock lock(runningMutex);
|
||||
}
|
||||
|
||||
void chunkManager::flush() {
|
||||
centralAudioBuffer->clearBuffer();
|
||||
}
|
||||
|
||||
void chunkManager::runTask() {
|
||||
std::scoped_lock lock(runningMutex);
|
||||
size_t lastHash = 0;
|
||||
|
||||
while (isRunning) {
|
||||
|
||||
if (isPaused) {
|
||||
BELL_SLEEP_MS(100);
|
||||
continue;
|
||||
}
|
||||
|
||||
auto chunk = centralAudioBuffer->readChunk();
|
||||
|
||||
if (!chunk || chunk->pcmSize == 0) {
|
||||
BELL_SLEEP_MS(50);
|
||||
continue;
|
||||
}
|
||||
|
||||
// receiving first chunk of new track from Spotify server
|
||||
if (lastHash != chunk->trackHash) {
|
||||
CSPOT_LOG(info, "hash update %x => %x", lastHash, chunk->trackHash);
|
||||
lastHash = chunk->trackHash;
|
||||
trackHandler();
|
||||
}
|
||||
|
||||
dataHandler(chunk->pcmData, chunk->pcmSize);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************************
|
||||
* Player's main class & task
|
||||
*/
|
||||
@@ -114,19 +42,21 @@ class cspotPlayer : public bell::Task {
|
||||
private:
|
||||
std::string name;
|
||||
bell::WrappedSemaphore clientConnected;
|
||||
|
||||
std::atomic<bool> isPaused, isConnected;
|
||||
|
||||
int startOffset, volume = 0, bitrate = 160;
|
||||
httpd_handle_t serverHandle;
|
||||
int serverPort;
|
||||
cspot_cmd_cb_t cmdHandler;
|
||||
cspot_data_cb_t dataHandler;
|
||||
std::string lastTrackId;
|
||||
|
||||
std::shared_ptr<cspot::LoginBlob> blob;
|
||||
std::unique_ptr<cspot::SpircHandler> spirc;
|
||||
std::unique_ptr<chunkManager> chunker;
|
||||
|
||||
void eventHandler(std::unique_ptr<cspot::SpircHandler::Event> event);
|
||||
void trackHandler(void);
|
||||
size_t pcmWrite(uint8_t *pcm, size_t bytes, std::string_view trackId);
|
||||
|
||||
void runTask();
|
||||
|
||||
@@ -155,6 +85,17 @@ cspotPlayer::cspotPlayer(const char* name, httpd_handle_t server, int port, cspo
|
||||
if (bitrate != 96 && bitrate != 160 && bitrate != 320) bitrate = 160;
|
||||
}
|
||||
|
||||
size_t cspotPlayer::pcmWrite(uint8_t *pcm, size_t bytes, std::string_view trackId) {
|
||||
if (lastTrackId != trackId) {
|
||||
CSPOT_LOG(info, "new track started <%s> => <%s>", lastTrackId.c_str(), trackId.data());
|
||||
lastTrackId = trackId;
|
||||
trackHandler();
|
||||
}
|
||||
|
||||
dataHandler(pcm, bytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
static esp_err_t handleGET(httpd_req_t *request) {
|
||||
return player->handleGET(request);
|
||||
@@ -233,8 +174,7 @@ esp_err_t cspotPlayer::handlePOST(httpd_req_t *request) {
|
||||
void cspotPlayer::eventHandler(std::unique_ptr<cspot::SpircHandler::Event> event) {
|
||||
switch (event->eventType) {
|
||||
case cspot::SpircHandler::EventType::PLAYBACK_START: {
|
||||
chunker->flush();
|
||||
|
||||
lastTrackId.clear();
|
||||
// we are not playing anymore
|
||||
trackStatus = TRACK_INIT;
|
||||
// memorize position for when track's beginning will be detected
|
||||
@@ -247,13 +187,12 @@ void cspotPlayer::eventHandler(std::unique_ptr<cspot::SpircHandler::Event> event
|
||||
break;
|
||||
}
|
||||
case cspot::SpircHandler::EventType::PLAY_PAUSE: {
|
||||
bool pause = std::get<bool>(event->data);
|
||||
cmdHandler(pause ? CSPOT_PAUSE : CSPOT_PLAY);
|
||||
chunker->isPaused = pause;
|
||||
isPaused = std::get<bool>(event->data);
|
||||
cmdHandler(isPaused ? CSPOT_PAUSE : CSPOT_PLAY);
|
||||
break;
|
||||
}
|
||||
case cspot::SpircHandler::EventType::TRACK_INFO: {
|
||||
auto trackInfo = std::get<cspot::CDNTrackStream::TrackInfo>(event->data);
|
||||
auto trackInfo = std::get<cspot::TrackInfo>(event->data);
|
||||
cmdHandler(CSPOT_TRACK_INFO, trackInfo.duration, startOffset, trackInfo.artist.c_str(),
|
||||
trackInfo.album.c_str(), trackInfo.name.c_str(), trackInfo.imageUrl.c_str());
|
||||
spirc->updatePositionMs(startOffset);
|
||||
@@ -264,17 +203,14 @@ void cspotPlayer::eventHandler(std::unique_ptr<cspot::SpircHandler::Event> event
|
||||
case cspot::SpircHandler::EventType::PREV:
|
||||
case cspot::SpircHandler::EventType::FLUSH: {
|
||||
// FLUSH is sent when there is no next, just clean everything
|
||||
chunker->flush();
|
||||
cmdHandler(CSPOT_FLUSH);
|
||||
break;
|
||||
}
|
||||
case cspot::SpircHandler::EventType::DISC:
|
||||
chunker->flush();
|
||||
cmdHandler(CSPOT_DISC);
|
||||
chunker->teardown();
|
||||
isConnected = false;
|
||||
break;
|
||||
case cspot::SpircHandler::EventType::SEEK: {
|
||||
chunker->flush();
|
||||
cmdHandler(CSPOT_SEEK, std::get<int>(event->data));
|
||||
break;
|
||||
}
|
||||
@@ -293,10 +229,9 @@ void cspotPlayer::eventHandler(std::unique_ptr<cspot::SpircHandler::Event> event
|
||||
|
||||
void cspotPlayer::trackHandler(void) {
|
||||
// this is just informative
|
||||
auto trackInfo = spirc->getTrackPlayer()->getCurrentTrackInfo();
|
||||
uint32_t remains;
|
||||
cmdHandler(CSPOT_QUERY_REMAINING, &remains);
|
||||
CSPOT_LOG(info, "next track <%s> will play in %d ms", trackInfo.name.c_str(), remains);
|
||||
CSPOT_LOG(info, "next track will play in %d ms", remains);
|
||||
|
||||
// inform sink of track beginning
|
||||
trackStatus = TRACK_NOTIFY;
|
||||
@@ -317,7 +252,8 @@ void cspotPlayer::command(cspot_event_t event) {
|
||||
break;
|
||||
// setPause comes back through cspot::event with PLAY/PAUSE
|
||||
case CSPOT_TOGGLE:
|
||||
spirc->setPause(!chunker->isPaused);
|
||||
isPaused = !isPaused;
|
||||
spirc->setPause(isPaused);
|
||||
break;
|
||||
case CSPOT_STOP:
|
||||
case CSPOT_PAUSE:
|
||||
@@ -326,12 +262,11 @@ void cspotPlayer::command(cspot_event_t event) {
|
||||
case CSPOT_PLAY:
|
||||
spirc->setPause(false);
|
||||
break;
|
||||
// calling spirc->disconnect() might have been logical but it does not
|
||||
// generate any cspot::event, so we need to manually force exiting player
|
||||
// loop through chunker which will eventually do the disconnect
|
||||
/* Calling spirc->disconnect() might have been logical but it does not
|
||||
* generate any cspot::event */
|
||||
case CSPOT_DISC:
|
||||
cmdHandler(CSPOT_DISC);
|
||||
chunker->teardown();
|
||||
isConnected = false;
|
||||
break;
|
||||
// spirc->setRemoteVolume does not generate a cspot::event so call cmdHandler
|
||||
case CSPOT_VOLUME_UP:
|
||||
@@ -391,20 +326,12 @@ void cspotPlayer::runTask() {
|
||||
// Auth successful
|
||||
if (token.size() > 0) {
|
||||
spirc = std::make_unique<cspot::SpircHandler>(ctx);
|
||||
isConnected = true;
|
||||
|
||||
// Create a player, pass the track handler
|
||||
chunker = std::make_unique<chunkManager>(
|
||||
[this](void) {
|
||||
return trackHandler();
|
||||
},
|
||||
[this](const uint8_t* data, size_t bytes) {
|
||||
return dataHandler(data, bytes);
|
||||
});
|
||||
|
||||
// set call back to calculate a hash on trackId
|
||||
spirc->getTrackPlayer()->setDataCallback(
|
||||
[this](uint8_t* data, size_t bytes, std::string_view trackId, size_t sequence) {
|
||||
return chunker->writePCM(data, bytes, trackId, sequence);
|
||||
[this](uint8_t* data, size_t bytes, std::string_view trackId) {
|
||||
return pcmWrite(data, bytes, trackId);
|
||||
});
|
||||
|
||||
// set event (PLAY, VOLUME...) handler
|
||||
@@ -420,7 +347,7 @@ void cspotPlayer::runTask() {
|
||||
cmdHandler(CSPOT_VOLUME, volume);
|
||||
|
||||
// exit when player has stopped (received a DISC)
|
||||
while (chunker->isRunning) {
|
||||
while (isConnected) {
|
||||
ctx->session->handlePacket();
|
||||
|
||||
// low-accuracy polling events
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
|
||||
CompileFlags:
|
||||
CompilationDatabase: example/build # Search build/ directory for compile_commands.json
|
||||
|
||||
@@ -7,6 +7,8 @@ project(bell)
|
||||
option(BELL_DISABLE_CODECS "Disable the entire audio codec wrapper" OFF)
|
||||
option(BELL_CODEC_AAC "Support libhelix-aac codec" ON)
|
||||
option(BELL_CODEC_MP3 "Support libhelix-mp3 codec" ON)
|
||||
option(BELL_DISABLE_MQTT "Disable the built-in MQTT wrapper" OFF)
|
||||
option(BELL_DISABLE_WEBSERVER "Disable the built-in Web server" OFF)
|
||||
option(BELL_CODEC_VORBIS "Support tremor Vorbis codec" ON)
|
||||
option(BELL_CODEC_ALAC "Support Apple ALAC codec" ON)
|
||||
option(BELL_CODEC_OPUS "Support Opus codec" ON)
|
||||
@@ -63,13 +65,15 @@ endif()
|
||||
|
||||
message(STATUS " Use cJSON only: ${BELL_ONLY_CJSON}")
|
||||
message(STATUS " Disable Fmt: ${BELL_DISABLE_FMT}")
|
||||
message(STATUS " Disable Mqtt: ${BELL_DISABLE_MQTT}")
|
||||
message(STATUS " Disable Regex: ${BELL_DISABLE_REGEX}")
|
||||
message(STATUS " Disable Web server: ${BELL_DISABLE_WEBSERVER}")
|
||||
|
||||
# Include nanoPB library
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/external/nanopb/extra")
|
||||
find_package(Nanopb REQUIRED)
|
||||
message(${NANOPB_INCLUDE_DIRS})
|
||||
list(APPEND EXTRA_INCLUDES ${NANOPB_INCLUDE_DIRS})
|
||||
list(APPEND EXTERNAL_INCLUDES ${NANOPB_INCLUDE_DIRS})
|
||||
|
||||
# CMake options
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
@@ -84,7 +88,7 @@ set(IO_DIR "${CMAKE_CURRENT_SOURCE_DIR}/main/io")
|
||||
set(PLATFORM_DIR "${CMAKE_CURRENT_SOURCE_DIR}/main/platform")
|
||||
set(UTILITIES_DIR "${CMAKE_CURRENT_SOURCE_DIR}/main/utilities")
|
||||
|
||||
add_definitions("-DUSE_DEFAULT_STDLIB=1")
|
||||
add_definitions("-DUSE_DEFAULT_STDLIB=1 -DTARGET_OS_IPHONE=0")
|
||||
|
||||
# Main library sources
|
||||
file(GLOB SOURCES
|
||||
@@ -93,8 +97,6 @@ file(GLOB SOURCES
|
||||
"main/io/*.cpp" "main/io/*.c"
|
||||
)
|
||||
|
||||
list(REMOVE_ITEM SOURCES "${IO_DIR}/BellTar.cpp" "${IO_DIR}/BellHTTPServer.cpp")
|
||||
|
||||
list(APPEND EXTRA_INCLUDES "main/audio-codec/include")
|
||||
list(APPEND EXTRA_INCLUDES "main/audio-dsp/include")
|
||||
list(APPEND EXTRA_INCLUDES "main/audio-sinks/include")
|
||||
@@ -111,7 +113,7 @@ endif()
|
||||
if(APPLE)
|
||||
file(GLOB APPLE_PLATFORM_SOURCES "main/platform/apple/*.cpp" "main/platform/apple/*.c")
|
||||
list(APPEND SOURCES ${APPLE_PLATFORM_SOURCES})
|
||||
list(APPEND EXTRA_INCLUDES "/usr/local/opt/mbedtls@3/include")
|
||||
list(APPEND EXTERNAL_INCLUDES "/usr/local/opt/mbedtls@3/include")
|
||||
endif()
|
||||
|
||||
if(UNIX AND NOT APPLE)
|
||||
@@ -122,7 +124,7 @@ endif()
|
||||
if(WIN32)
|
||||
file(GLOB WIN32_PLATFORM_SOURCES "main/platform/win32/*.cpp" "main/platform/win32/*.c")
|
||||
list(APPEND SOURCES ${WIN32_PLATFORM_SOURCES})
|
||||
list(APPEND EXTRA_INCLUDES "main/platform/win32")
|
||||
list(APPEND EXTERNAL_INCLUDES "main/platform/win32")
|
||||
endif()
|
||||
|
||||
# A hack to make Opus keep quiet
|
||||
@@ -139,7 +141,7 @@ if(ESP_PLATFORM)
|
||||
else()
|
||||
find_package(Threads REQUIRED)
|
||||
find_package(MbedTLS REQUIRED)
|
||||
list(APPEND EXTRA_INCLUDES ${MBEDTLS_INCLUDE_DIRS})
|
||||
list(APPEND EXTERNAL_INCLUDES ${MBEDTLS_INCLUDE_DIRS})
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
list(APPEND EXTRA_LIBS ${MBEDTLS_LIBRARIES} Threads::Threads)
|
||||
|
||||
@@ -149,6 +151,14 @@ else()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (NOT BELL_DISABLE_MQTT)
|
||||
file(GLOB MQTT_SOURCES "external/mqtt/*.c")
|
||||
list(APPEND SOURCES ${MQTT_SOURCES})
|
||||
list(APPEND EXTRA_INCLUDES "external/mqtt/include")
|
||||
else()
|
||||
list(REMOVE_ITEM SOURCES "${IO_DIR}/BellMQTTClient.cpp")
|
||||
endif()
|
||||
|
||||
if(NOT BELL_DISABLE_CODECS)
|
||||
file(GLOB EXTRA_SOURCES "main/audio-containers/*.cpp" "main/audio-codec/*.cpp" "main/audio-codec/*.c" "main/audio-dsp/*.cpp" "main/audio-dsp/*.c")
|
||||
|
||||
@@ -162,7 +172,7 @@ if(NOT BELL_DISABLE_CODECS)
|
||||
if(BELL_CODEC_AAC)
|
||||
file(GLOB LIBHELIX_AAC_SOURCES "external/libhelix-aac/*.c")
|
||||
list(APPEND LIBHELIX_SOURCES ${LIBHELIX_AAC_SOURCES})
|
||||
list(APPEND EXTRA_INCLUDES "external/libhelix-aac")
|
||||
list(APPEND EXTERNAL_INCLUDES "external/libhelix-aac")
|
||||
list(APPEND SOURCES "${AUDIO_CODEC_DIR}/AACDecoder.cpp")
|
||||
list(APPEND CODEC_FLAGS "-DBELL_CODEC_AAC")
|
||||
endif()
|
||||
@@ -171,7 +181,7 @@ if(NOT BELL_DISABLE_CODECS)
|
||||
if(BELL_CODEC_MP3)
|
||||
file(GLOB LIBHELIX_MP3_SOURCES "external/libhelix-mp3/*.c")
|
||||
list(APPEND LIBHELIX_SOURCES ${LIBHELIX_MP3_SOURCES})
|
||||
list(APPEND EXTRA_INCLUDES "external/libhelix-mp3")
|
||||
list(APPEND EXTERNAL_INCLUDES "external/libhelix-mp3")
|
||||
list(APPEND SOURCES "${AUDIO_CODEC_DIR}/MP3Decoder.cpp")
|
||||
list(APPEND CODEC_FLAGS "-DBELL_CODEC_MP3")
|
||||
endif()
|
||||
@@ -230,7 +240,7 @@ else()
|
||||
file(GLOB TREMOR_SOURCES "external/tremor/*.c")
|
||||
list(REMOVE_ITEM TREMOR_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/external/tremor/ivorbisfile_example.c")
|
||||
list(APPEND SOURCES ${TREMOR_SOURCES})
|
||||
list(APPEND EXTRA_INCLUDES "external/tremor")
|
||||
list(APPEND EXTERNAL_INCLUDES "external/tremor")
|
||||
endif()
|
||||
|
||||
if(NOT BELL_DISABLE_SINKS)
|
||||
@@ -247,7 +257,7 @@ if(NOT BELL_DISABLE_SINKS)
|
||||
# Find ALSA if required, else remove the sink
|
||||
if(BELL_SINK_ALSA)
|
||||
find_package(ALSA REQUIRED)
|
||||
list(APPEND EXTRA_INCLUDES ${ALSA_INCLUDE_DIRS})
|
||||
list(APPEND EXTERNAL_INCLUDES ${ALSA_INCLUDE_DIRS})
|
||||
list(APPEND EXTRA_LIBS ${ALSA_LIBRARIES})
|
||||
else()
|
||||
list(REMOVE_ITEM SINK_SOURCES "${AUDIO_SINKS_DIR}/unix/ALSAAudioSink.cpp")
|
||||
@@ -256,7 +266,7 @@ if(NOT BELL_DISABLE_SINKS)
|
||||
# Find PortAudio if required, else remove the sink
|
||||
if(BELL_SINK_PORTAUDIO)
|
||||
find_package(Portaudio REQUIRED)
|
||||
list(APPEND EXTRA_INCLUDES ${PORTAUDIO_INCLUDE_DIRS})
|
||||
list(APPEND EXTERNAL_INCLUDES ${PORTAUDIO_INCLUDE_DIRS})
|
||||
list(APPEND EXTRA_LIBS ${PORTAUDIO_LIBRARIES})
|
||||
else()
|
||||
list(REMOVE_ITEM SINK_SOURCES "${AUDIO_SINKS_DIR}/unix/PortAudioSink.cpp")
|
||||
@@ -266,6 +276,7 @@ if(NOT BELL_DISABLE_SINKS)
|
||||
endif()
|
||||
|
||||
if(NOT BELL_ONLY_CJSON)
|
||||
set(JSON_SystemInclude ON CACHE INTERNAL "")
|
||||
add_subdirectory(external/nlohmann_json)
|
||||
list(APPEND EXTRA_LIBS nlohmann_json::nlohmann_json)
|
||||
endif()
|
||||
@@ -274,22 +285,25 @@ if(BELL_EXTERNAL_CJSON)
|
||||
list(APPEND EXTRA_LIBS ${BELL_EXTERNAL_CJSON})
|
||||
else()
|
||||
list(APPEND SOURCES "external/cJSON/cJSON.c")
|
||||
list(APPEND EXTRA_INCLUDES "external/cJSON")
|
||||
list(APPEND EXTERNAL_INCLUDES "external/cJSON")
|
||||
endif()
|
||||
|
||||
if (NOT BELL_DISABLE_FMT)
|
||||
list(APPEND EXTRA_INCLUDES "external/fmt/include")
|
||||
list(APPEND EXTERNAL_INCLUDES "external/fmt/include")
|
||||
endif()
|
||||
|
||||
if(WIN32 OR UNIX)
|
||||
list(APPEND SOURCES "external/mdnssvc/mdns.c" "external/mdnssvc/mdnsd.c")
|
||||
list(APPEND EXTRA_INCLUDES "external/mdnssvc")
|
||||
list(APPEND EXTERNAL_INCLUDES "external/mdnssvc")
|
||||
endif()
|
||||
|
||||
# file(GLOB CIVET_SRC "external/civetweb/*.c" "external/civetweb/*.inl" "external/civetweb/*.cpp")
|
||||
|
||||
# list(APPEND SOURCES ${CIVET_SRC})
|
||||
# list(APPEND EXTRA_INCLUDES "external/civetweb/include")
|
||||
if(NOT BELL_DISABLE_WEBSERVER)
|
||||
file(GLOB CIVET_SRC "external/civetweb/*.c" "external/civetweb/*.inl" "external/civetweb/*.cpp")
|
||||
list(APPEND SOURCES ${CIVET_SRC})
|
||||
list(APPEND EXTRA_INCLUDES "external/civetweb/include")
|
||||
else()
|
||||
list(REMOVE_ITEM SOURCES "${IO_DIR}/BellHTTPServer.cpp")
|
||||
endif()
|
||||
|
||||
add_library(bell STATIC ${SOURCES})
|
||||
|
||||
@@ -305,6 +319,7 @@ endif()
|
||||
# PUBLIC to propagate esp-idf includes to bell dependents
|
||||
target_link_libraries(bell PUBLIC ${EXTRA_LIBS})
|
||||
target_include_directories(bell PUBLIC ${EXTRA_INCLUDES} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
target_include_directories(bell SYSTEM PUBLIC ${EXTERNAL_INCLUDES})
|
||||
target_compile_definitions(bell PUBLIC PB_ENABLE_MALLOC FMT_HEADER_ONLY)
|
||||
|
||||
if(BELL_DISABLE_CODECS)
|
||||
|
||||
@@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.18)
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_BUILD_TYPE Debug)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
||||
set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES})
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/../ ${CMAKE_CURRENT_BINARY_DIR}/bell)
|
||||
|
||||
file(GLOB SOURCES "*.cpp")
|
||||
|
||||
@@ -1,26 +1,14 @@
|
||||
#include <memory.h>
|
||||
#include <atomic>
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "AudioCodecs.h"
|
||||
#include "AudioContainers.h"
|
||||
#include "BellHTTPServer.h"
|
||||
#include "BellTar.h"
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
|
||||
#include "BellTask.h"
|
||||
#include "CentralAudioBuffer.h"
|
||||
#include "Compressor.h"
|
||||
#include "DecoderGlobals.h"
|
||||
#include "EncodedAudioStream.h"
|
||||
#include "HTTPClient.h"
|
||||
#include "PortAudioSink.h"
|
||||
#define DEBUG_LEVEL 4
|
||||
#include "X509Bundle.h"
|
||||
#include "mbedtls/debug.h"
|
||||
#include "StreamInfo.h"
|
||||
|
||||
#define DEBUG_LEVEL 4
|
||||
#include <BellDSP.h>
|
||||
#include <BellLogger.h>
|
||||
|
||||
@@ -58,13 +46,8 @@ class AudioPlayer : bell::Task {
|
||||
int main() {
|
||||
bell::setDefaultLogger();
|
||||
|
||||
std::fstream file("system.tar", std::ios::in | std::ios::binary);
|
||||
if (!file.is_open()) {
|
||||
std::cout << "file not open" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
BellTar::reader reader(file);
|
||||
reader.extract_all_files("./dupa2");
|
||||
BELL_LOG(info, "cock", "Published?");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -118,12 +118,15 @@
|
||||
#
|
||||
#=============================================================================
|
||||
|
||||
|
||||
function(NANOPB_GENERATE_CPP SRCS HDRS)
|
||||
cmake_parse_arguments(NANOPB_GENERATE_CPP "" "RELPATH" "" ${ARGN})
|
||||
if(NOT NANOPB_GENERATE_CPP_UNPARSED_ARGUMENTS)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
set(CUSTOM_COMMAND_PREFIX call)
|
||||
endif()
|
||||
|
||||
if(NANOPB_GENERATE_CPP_APPEND_PATH)
|
||||
# Create an include path for each file specified
|
||||
@@ -184,7 +187,7 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
|
||||
set(GENERATOR_CORE_PYTHON_SRC ${GENERATOR_CORE_PYTHON_SRC} ${output})
|
||||
add_custom_command(
|
||||
OUTPUT ${output}
|
||||
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
|
||||
COMMAND ${CUSTOM_COMMAND_PREFIX} ${PROTOBUF_PROTOC_EXECUTABLE}
|
||||
ARGS -I${GENERATOR_PATH}/proto
|
||||
--python_out=${GENERATOR_CORE_DIR} ${ABS_FIL}
|
||||
DEPENDS ${ABS_FIL}
|
||||
@@ -276,7 +279,7 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
|
||||
add_custom_command(
|
||||
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_PATH_REL}/${FIL_WE}.pb.c"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/${FIL_PATH_REL}/${FIL_WE}.pb.h"
|
||||
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
|
||||
COMMAND ${CUSTOM_COMMAND_PREFIX} ${PROTOBUF_PROTOC_EXECUTABLE}
|
||||
ARGS -I${GENERATOR_PATH} -I${GENERATOR_CORE_DIR}
|
||||
-I${CMAKE_CURRENT_BINARY_DIR} ${_nanopb_include_path}
|
||||
--plugin=protoc-gen-nanopb=${NANOPB_GENERATOR_PLUGIN}
|
||||
@@ -292,6 +295,10 @@ function(NANOPB_GENERATE_CPP SRCS HDRS)
|
||||
set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
|
||||
set(${SRCS} ${${SRCS}} ${NANOPB_SRCS} PARENT_SCOPE)
|
||||
set(${HDRS} ${${HDRS}} ${NANOPB_HDRS} PARENT_SCOPE)
|
||||
|
||||
if(MSVC)
|
||||
unset(CUSTOM_COMMAND_PREFIX)
|
||||
endif()
|
||||
|
||||
endfunction()
|
||||
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
#include "AACDecoder.h"
|
||||
#include <iostream>
|
||||
|
||||
#include <stdlib.h> // for free, malloc
|
||||
|
||||
#include "CodecType.h" // for bell
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
} // namespace bell
|
||||
|
||||
using namespace bell;
|
||||
|
||||
|
||||
@@ -1,27 +1,37 @@
|
||||
#include "AudioCodecs.h"
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
|
||||
#include <map> // for map, operator!=, map<>::iterator, map<>:...
|
||||
#include <type_traits> // for remove_extent_t
|
||||
|
||||
#include "AudioContainer.h" // for AudioContainer
|
||||
|
||||
namespace bell {
|
||||
class BaseCodec;
|
||||
} // namespace bell
|
||||
|
||||
using namespace bell;
|
||||
|
||||
#ifdef BELL_CODEC_AAC
|
||||
#include "AACDecoder.h"
|
||||
#include "AACDecoder.h" // for AACDecoder
|
||||
|
||||
static std::shared_ptr<AACDecoder> codecAac;
|
||||
#endif
|
||||
|
||||
#ifdef BELL_CODEC_MP3
|
||||
#include "MP3Decoder.h"
|
||||
#include "MP3Decoder.h" // for MP3Decoder
|
||||
|
||||
static std::shared_ptr<MP3Decoder> codecMp3;
|
||||
#endif
|
||||
|
||||
#ifdef BELL_CODEC_VORBIS
|
||||
#include "VorbisDecoder.h"
|
||||
#include "VorbisDecoder.h" // for VorbisDecoder
|
||||
|
||||
static std::shared_ptr<VorbisDecoder> codecVorbis;
|
||||
#endif
|
||||
|
||||
#ifdef BELL_CODEC_OPUS
|
||||
#include "OPUSDecoder.h"
|
||||
#include "OPUSDecoder.h" // for OPUSDecoder
|
||||
|
||||
static std::shared_ptr<OPUSDecoder> codecOpus;
|
||||
#endif
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#include "BaseCodec.h"
|
||||
#include <iostream>
|
||||
|
||||
#include "AudioContainer.h" // for AudioContainer
|
||||
#include "CodecType.h" // for bell
|
||||
|
||||
using namespace bell;
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
bell::DecodersInstance* bell::decodersInstance;
|
||||
|
||||
void bell::createDecoders()
|
||||
{
|
||||
bell::decodersInstance = new bell::DecodersInstance();
|
||||
void bell::createDecoders() {
|
||||
bell::decodersInstance = new bell::DecodersInstance();
|
||||
}
|
||||
@@ -1,5 +1,13 @@
|
||||
#include "MP3Decoder.h"
|
||||
|
||||
#include <stdlib.h> // for free, malloc
|
||||
|
||||
#include "CodecType.h" // for bell
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
} // namespace bell
|
||||
|
||||
using namespace bell;
|
||||
|
||||
MP3Decoder::MP3Decoder() {
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
#include "OPUSDecoder.h"
|
||||
#include "opus.h"
|
||||
|
||||
#include <stdlib.h> // for free, malloc
|
||||
|
||||
#include "CodecType.h" // for bell
|
||||
#include "opus.h" // for opus_decoder_destroy, opus_decode, opus_decod...
|
||||
|
||||
using namespace bell;
|
||||
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
#include "VorbisDecoder.h"
|
||||
#include "AudioCodecs.h"
|
||||
|
||||
#include <stdlib.h> // for free, malloc
|
||||
|
||||
#include "CodecType.h" // for bell
|
||||
#include "config_types.h" // for ogg_int16_t
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
} // namespace bell
|
||||
|
||||
using namespace bell;
|
||||
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include "BaseCodec.h"
|
||||
#include "aacdec.h"
|
||||
#include <stdint.h> // for uint8_t, uint32_t, int16_t
|
||||
|
||||
#include "BaseCodec.h" // for BaseCodec
|
||||
#include "aacdec.h" // for AACFrameInfo, HAACDecoder
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
|
||||
class AACDecoder : public BaseCodec {
|
||||
private:
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "BaseCodec.h"
|
||||
#include "AudioContainer.h"
|
||||
#include <memory> // for shared_ptr
|
||||
|
||||
#include "AudioContainer.h" // for AudioContainer
|
||||
#include "BaseCodec.h" // for BaseCodec
|
||||
#include "CodecType.h" // for AudioCodec
|
||||
|
||||
namespace bell {
|
||||
|
||||
class AudioCodecs {
|
||||
public:
|
||||
static std::shared_ptr<BaseCodec> getCodec(AudioCodec type);
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "AudioContainer.h"
|
||||
#include <stdint.h> // for uint32_t, uint8_t
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
|
||||
class BaseCodec {
|
||||
private:
|
||||
|
||||
@@ -5,48 +5,40 @@
|
||||
#define AAC_READBUF_SIZE (4 * AAC_MAINBUF_SIZE * AAC_MAX_NCHANS)
|
||||
#define MP3_READBUF_SIZE (2 * 1024);
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <memory>
|
||||
#include "aacdec.h"
|
||||
#include "mp3dec.h"
|
||||
#include <stdio.h> // for NULL
|
||||
|
||||
namespace bell
|
||||
{
|
||||
class DecodersInstance
|
||||
{
|
||||
public:
|
||||
DecodersInstance(){};
|
||||
~DecodersInstance()
|
||||
{
|
||||
MP3FreeDecoder(mp3Decoder);
|
||||
AACFreeDecoder(aacDecoder);
|
||||
};
|
||||
#include "aacdec.h" // for AACFreeDecoder, AACInitDecoder, HAACDecoder
|
||||
#include "mp3dec.h" // for MP3FreeDecoder, MP3InitDecoder, HMP3Decoder
|
||||
|
||||
HAACDecoder aacDecoder = NULL;
|
||||
HMP3Decoder mp3Decoder = NULL;
|
||||
namespace bell {
|
||||
class DecodersInstance {
|
||||
public:
|
||||
DecodersInstance(){};
|
||||
~DecodersInstance() {
|
||||
MP3FreeDecoder(mp3Decoder);
|
||||
AACFreeDecoder(aacDecoder);
|
||||
};
|
||||
|
||||
void ensureAAC()
|
||||
{
|
||||
if (aacDecoder == NULL)
|
||||
{
|
||||
aacDecoder = AACInitDecoder();
|
||||
}
|
||||
}
|
||||
HAACDecoder aacDecoder = NULL;
|
||||
HMP3Decoder mp3Decoder = NULL;
|
||||
|
||||
void ensureMP3()
|
||||
{
|
||||
if (mp3Decoder == NULL)
|
||||
{
|
||||
mp3Decoder = MP3InitDecoder();
|
||||
}
|
||||
}
|
||||
};
|
||||
void ensureAAC() {
|
||||
if (aacDecoder == NULL) {
|
||||
aacDecoder = AACInitDecoder();
|
||||
}
|
||||
}
|
||||
|
||||
extern bell::DecodersInstance* decodersInstance;
|
||||
void ensureMP3() {
|
||||
if (mp3Decoder == NULL) {
|
||||
mp3Decoder = MP3InitDecoder();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void createDecoders();
|
||||
}
|
||||
extern bell::DecodersInstance* decodersInstance;
|
||||
|
||||
void createDecoders();
|
||||
} // namespace bell
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include "BaseCodec.h"
|
||||
#include "mp3dec.h"
|
||||
#include <stdint.h> // for uint8_t, uint32_t, int16_t
|
||||
|
||||
#include "BaseCodec.h" // for BaseCodec
|
||||
#include "mp3dec.h" // for HMP3Decoder, MP3FrameInfo
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
|
||||
class MP3Decoder : public BaseCodec {
|
||||
private:
|
||||
HMP3Decoder mp3;
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "BaseCodec.h"
|
||||
#include <stdint.h> // for uint8_t, uint32_t, int16_t
|
||||
|
||||
#include "BaseCodec.h" // for BaseCodec
|
||||
|
||||
struct OpusDecoder;
|
||||
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include "BaseCodec.h"
|
||||
#include "ivorbiscodec.h"
|
||||
#include <stdint.h> // for uint8_t, uint32_t, int16_t
|
||||
|
||||
#include "BaseCodec.h" // for BaseCodec
|
||||
#include "ivorbiscodec.h" // for vorbis_comment, vorbis_dsp_state, vorbis_info
|
||||
#include "ogg.h" // for ogg_packet
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
|
||||
class VorbisDecoder : public BaseCodec {
|
||||
private:
|
||||
vorbis_info* vi = nullptr;
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
#include "AACContainer.h"
|
||||
#include "iostream"
|
||||
|
||||
#include <cstring> // for memmove
|
||||
|
||||
#include "StreamInfo.h" // for BitWidth, BitWidth::BW_16, SampleRate, Sampl...
|
||||
#include "aacdec.h" // for AACFindSyncWord
|
||||
|
||||
using namespace bell;
|
||||
|
||||
#define SYNC_WORLD_LEN 4
|
||||
|
||||
@@ -1,5 +1,16 @@
|
||||
#include "AudioContainers.h"
|
||||
|
||||
#include <string.h> // for memcmp
|
||||
#include <cstddef> // for byte
|
||||
|
||||
#include "AACContainer.h" // for AACContainer
|
||||
#include "CodecType.h" // for bell
|
||||
#include "MP3Container.h" // for MP3Container
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
} // namespace bell
|
||||
|
||||
using namespace bell;
|
||||
|
||||
std::unique_ptr<bell::AudioContainer> AudioContainers::guessAudioContainer(
|
||||
@@ -7,8 +18,7 @@ std::unique_ptr<bell::AudioContainer> AudioContainers::guessAudioContainer(
|
||||
std::byte tmp[14];
|
||||
istr.read((char*)tmp, sizeof(tmp));
|
||||
|
||||
if (memcmp(tmp, "\xFF\xF1", 2) == 0 ||
|
||||
memcmp(tmp, "\xFF\xF9", 2) == 0) {
|
||||
if (memcmp(tmp, "\xFF\xF1", 2) == 0 || memcmp(tmp, "\xFF\xF9", 2) == 0) {
|
||||
// AAC found
|
||||
std::cout << "AAC" << std::endl;
|
||||
return std::make_unique<bell::AACContainer>(istr);
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
#include "MP3Container.h"
|
||||
|
||||
#include <cstring> // for memmove
|
||||
|
||||
#include "StreamInfo.h" // for BitWidth, BitWidth::BW_16, SampleRate, Sampl...
|
||||
#include "mp3dec.h" // for MP3FindSyncWord
|
||||
|
||||
using namespace bell;
|
||||
|
||||
MP3Container::MP3Container(std::istream& istr) : bell::AudioContainer(istr) {}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
#include "AudioContainer.h"
|
||||
#include "aacdec.h"
|
||||
#include <stdint.h> // for uint32_t
|
||||
#include <cstddef> // for byte, size_t
|
||||
#include <istream> // for istream
|
||||
#include <vector> // for vector
|
||||
|
||||
#include "AudioContainer.h" // for AudioContainer
|
||||
#include "CodecType.h" // for AudioCodec, AudioCodec::AAC
|
||||
|
||||
namespace bell {
|
||||
class AACContainer : public AudioContainer {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <istream>
|
||||
#include <cstring>
|
||||
#include <istream>
|
||||
#include "CodecType.h"
|
||||
#include "StreamInfo.h"
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include "AACContainer.h"
|
||||
#include "AudioContainer.h"
|
||||
#include "MP3Container.h"
|
||||
#include <iostream> // for istream
|
||||
#include <memory> // for unique_ptr
|
||||
|
||||
namespace bell {
|
||||
class AudioContainer;
|
||||
} // namespace bell
|
||||
|
||||
namespace bell::AudioContainers {
|
||||
std::unique_ptr<bell::AudioContainer> guessAudioContainer(std::istream& istr);
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
#include "AudioContainer.h"
|
||||
#include "mp3dec.h"
|
||||
#include <stdint.h> // for uint32_t
|
||||
#include <cstddef> // for byte, size_t
|
||||
#include <istream> // for istream
|
||||
#include <vector> // for vector
|
||||
|
||||
#include "AudioContainer.h" // for AudioContainer
|
||||
#include "CodecType.h" // for AudioCodec, AudioCodec::MP3
|
||||
|
||||
namespace bell {
|
||||
class MP3Container : public AudioContainer {
|
||||
|
||||
@@ -1,39 +1,44 @@
|
||||
#include "AudioMixer.h"
|
||||
|
||||
#include <mutex> // for scoped_lock
|
||||
|
||||
using namespace bell;
|
||||
|
||||
AudioMixer::AudioMixer() {
|
||||
}
|
||||
AudioMixer::AudioMixer() {}
|
||||
|
||||
std::unique_ptr<StreamInfo> AudioMixer::process(std::unique_ptr<StreamInfo> info) {
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
if (info->numChannels != from) {
|
||||
throw std::runtime_error("AudioMixer: Input channel count does not match configuration");
|
||||
}
|
||||
info->numChannels = to;
|
||||
std::unique_ptr<StreamInfo> AudioMixer::process(
|
||||
std::unique_ptr<StreamInfo> info) {
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
if (info->numChannels != from) {
|
||||
throw std::runtime_error(
|
||||
"AudioMixer: Input channel count does not match configuration");
|
||||
}
|
||||
info->numChannels = to;
|
||||
|
||||
for (auto &singleConf : mixerConfig) {
|
||||
if (singleConf.source.size() == 1) {
|
||||
if (singleConf.source[0] == singleConf.destination) {
|
||||
continue;
|
||||
}
|
||||
// Copy channel
|
||||
for (int i = 0; i < info->numSamples; i++) {
|
||||
info->data[singleConf.destination][i] = info->data[singleConf.source[0]][i];
|
||||
}
|
||||
} else {
|
||||
// Mix channels
|
||||
float sample = 0.0f;
|
||||
for (int i = 0; i < info->numSamples; i++) {
|
||||
sample = 0.0;
|
||||
for (auto &source : singleConf.source) {
|
||||
sample += info->data[source][i];
|
||||
}
|
||||
|
||||
info->data[singleConf.destination][i] = sample / (float) singleConf.source.size();
|
||||
}
|
||||
for (auto& singleConf : mixerConfig) {
|
||||
if (singleConf.source.size() == 1) {
|
||||
if (singleConf.source[0] == singleConf.destination) {
|
||||
continue;
|
||||
}
|
||||
// Copy channel
|
||||
for (int i = 0; i < info->numSamples; i++) {
|
||||
info->data[singleConf.destination][i] =
|
||||
info->data[singleConf.source[0]][i];
|
||||
}
|
||||
} else {
|
||||
// Mix channels
|
||||
float sample = 0.0f;
|
||||
for (int i = 0; i < info->numSamples; i++) {
|
||||
sample = 0.0;
|
||||
for (auto& source : singleConf.source) {
|
||||
sample += info->data[source][i];
|
||||
}
|
||||
}
|
||||
|
||||
return info;
|
||||
info->data[singleConf.destination][i] =
|
||||
sample / (float)singleConf.source.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
@@ -1,47 +1,53 @@
|
||||
#include "AudioPipeline.h"
|
||||
#include <iostream>
|
||||
#include "BellLogger.h"
|
||||
|
||||
#include <type_traits> // for remove_extent_t
|
||||
#include <utility> // for move
|
||||
|
||||
#include "AudioTransform.h" // for AudioTransform
|
||||
#include "BellLogger.h" // for AbstractLogger, BELL_LOG
|
||||
#include "TransformConfig.h" // for TransformConfig
|
||||
|
||||
using namespace bell;
|
||||
|
||||
AudioPipeline::AudioPipeline() {
|
||||
AudioPipeline::AudioPipeline(){
|
||||
// this->headroomGainTransform = std::make_shared<Gain>(Channels::LEFT_RIGHT);
|
||||
// this->transforms.push_back(this->headroomGainTransform);
|
||||
};
|
||||
|
||||
void AudioPipeline::addTransform(std::shared_ptr<AudioTransform> transform) {
|
||||
transforms.push_back(transform);
|
||||
recalculateHeadroom();
|
||||
transforms.push_back(transform);
|
||||
recalculateHeadroom();
|
||||
}
|
||||
|
||||
void AudioPipeline::recalculateHeadroom() {
|
||||
float headroom = 0.0f;
|
||||
float headroom = 0.0f;
|
||||
|
||||
// Find largest headroom required by any transform down the chain, and apply it
|
||||
for (auto transform : transforms) {
|
||||
if (headroom < transform->calculateHeadroom()) {
|
||||
headroom = transform->calculateHeadroom();
|
||||
}
|
||||
// Find largest headroom required by any transform down the chain, and apply it
|
||||
for (auto transform : transforms) {
|
||||
if (headroom < transform->calculateHeadroom()) {
|
||||
headroom = transform->calculateHeadroom();
|
||||
}
|
||||
}
|
||||
|
||||
// headroomGainTransform->configure(-headroom);
|
||||
// headroomGainTransform->configure(-headroom);
|
||||
}
|
||||
|
||||
void AudioPipeline::volumeUpdated(int volume) {
|
||||
BELL_LOG(debug, "AudioPipeline", "Requested");
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
for (auto transform : transforms) {
|
||||
transform->config->currentVolume = volume;
|
||||
transform->reconfigure();
|
||||
}
|
||||
BELL_LOG(debug, "AudioPipeline", "Volume applied, DSP reconfigured");
|
||||
BELL_LOG(debug, "AudioPipeline", "Requested");
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
for (auto transform : transforms) {
|
||||
transform->config->currentVolume = volume;
|
||||
transform->reconfigure();
|
||||
}
|
||||
BELL_LOG(debug, "AudioPipeline", "Volume applied, DSP reconfigured");
|
||||
}
|
||||
|
||||
std::unique_ptr<StreamInfo> AudioPipeline::process(std::unique_ptr<StreamInfo> data) {
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
for (auto &transform : transforms) {
|
||||
data = transform->process(std::move(data));
|
||||
}
|
||||
std::unique_ptr<StreamInfo> AudioPipeline::process(
|
||||
std::unique_ptr<StreamInfo> data) {
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
for (auto& transform : transforms) {
|
||||
data = transform->process(std::move(data));
|
||||
}
|
||||
|
||||
return data;
|
||||
return data;
|
||||
}
|
||||
@@ -1,6 +1,10 @@
|
||||
#include "BellDSP.h"
|
||||
#include <iostream>
|
||||
#include "CentralAudioBuffer.h"
|
||||
|
||||
#include <type_traits> // for remove_extent_t
|
||||
#include <utility> // for move
|
||||
|
||||
#include "AudioPipeline.h" // for CentralAudioBuffer
|
||||
#include "CentralAudioBuffer.h" // for CentralAudioBuffer
|
||||
|
||||
using namespace bell;
|
||||
|
||||
|
||||
@@ -1,466 +1,439 @@
|
||||
#include "Biquad.h"
|
||||
|
||||
#include <cmath> // for pow, cosf, sinf, M_PI, sqrtf, tanf, logf, sinh
|
||||
|
||||
using namespace bell;
|
||||
|
||||
Biquad::Biquad()
|
||||
{
|
||||
this->filterType = "biquad";
|
||||
Biquad::Biquad() {
|
||||
this->filterType = "biquad";
|
||||
}
|
||||
|
||||
void Biquad::sampleRateChanged(uint32_t sampleRate)
|
||||
{
|
||||
this->sampleRate = sampleRate;
|
||||
//this->configure(this->type, this->currentConfig);
|
||||
void Biquad::sampleRateChanged(uint32_t sampleRate) {
|
||||
this->sampleRate = sampleRate;
|
||||
//this->configure(this->type, this->currentConfig);
|
||||
}
|
||||
|
||||
void Biquad::configure(Type type, std::map<std::string, float> &newConf)
|
||||
{
|
||||
this->type = type;
|
||||
this->currentConfig = newConf;
|
||||
void Biquad::configure(Type type, std::map<std::string, float>& newConf) {
|
||||
this->type = type;
|
||||
this->currentConfig = newConf;
|
||||
|
||||
switch (type)
|
||||
{
|
||||
switch (type) {
|
||||
case Type::Free:
|
||||
coeffs[0] = newConf["a1"];
|
||||
coeffs[1] = newConf["a2"];
|
||||
coeffs[2] = newConf["b0"];
|
||||
coeffs[3] = newConf["b1"];
|
||||
coeffs[4] = newConf["b2"];
|
||||
break;
|
||||
coeffs[0] = newConf["a1"];
|
||||
coeffs[1] = newConf["a2"];
|
||||
coeffs[2] = newConf["b0"];
|
||||
coeffs[3] = newConf["b1"];
|
||||
coeffs[4] = newConf["b2"];
|
||||
break;
|
||||
case Type::Highpass:
|
||||
highPassCoEffs(newConf["freq"], newConf["q"]);
|
||||
break;
|
||||
highPassCoEffs(newConf["freq"], newConf["q"]);
|
||||
break;
|
||||
case Type::HighpassFO:
|
||||
highPassFOCoEffs(newConf["freq"]);
|
||||
break;
|
||||
highPassFOCoEffs(newConf["freq"]);
|
||||
break;
|
||||
case Type::Lowpass:
|
||||
lowPassCoEffs(newConf["freq"], newConf["q"]);
|
||||
break;
|
||||
lowPassCoEffs(newConf["freq"], newConf["q"]);
|
||||
break;
|
||||
case Type::LowpassFO:
|
||||
lowPassFOCoEffs(newConf["freq"]);
|
||||
break;
|
||||
lowPassFOCoEffs(newConf["freq"]);
|
||||
break;
|
||||
case Type::Highshelf:
|
||||
// check if config has slope key
|
||||
if (newConf.find("slope") != newConf.end())
|
||||
{
|
||||
highShelfCoEffsSlope(newConf["freq"], newConf["gain"], newConf["slope"]);
|
||||
}
|
||||
else
|
||||
{
|
||||
highShelfCoEffs(newConf["freq"], newConf["gain"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
// check if config has slope key
|
||||
if (newConf.find("slope") != newConf.end()) {
|
||||
highShelfCoEffsSlope(newConf["freq"], newConf["gain"],
|
||||
newConf["slope"]);
|
||||
} else {
|
||||
highShelfCoEffs(newConf["freq"], newConf["gain"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
case Type::HighshelfFO:
|
||||
highShelfFOCoEffs(newConf["freq"], newConf["gain"]);
|
||||
break;
|
||||
highShelfFOCoEffs(newConf["freq"], newConf["gain"]);
|
||||
break;
|
||||
case Type::Lowshelf:
|
||||
// check if config has slope key
|
||||
if (newConf.find("slope") != newConf.end())
|
||||
{
|
||||
lowShelfCoEffsSlope(newConf["freq"], newConf["gain"], newConf["slope"]);
|
||||
}
|
||||
else
|
||||
{
|
||||
lowShelfCoEffs(newConf["freq"], newConf["gain"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
// check if config has slope key
|
||||
if (newConf.find("slope") != newConf.end()) {
|
||||
lowShelfCoEffsSlope(newConf["freq"], newConf["gain"], newConf["slope"]);
|
||||
} else {
|
||||
lowShelfCoEffs(newConf["freq"], newConf["gain"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
case Type::LowshelfFO:
|
||||
lowShelfFOCoEffs(newConf["freq"], newConf["gain"]);
|
||||
break;
|
||||
lowShelfFOCoEffs(newConf["freq"], newConf["gain"]);
|
||||
break;
|
||||
case Type::Peaking:
|
||||
// check if config has bandwidth key
|
||||
if (newConf.find("bandwidth") != newConf.end())
|
||||
{
|
||||
peakCoEffsBandwidth(newConf["freq"], newConf["gain"], newConf["bandwidth"]);
|
||||
}
|
||||
else
|
||||
{
|
||||
peakCoEffs(newConf["freq"], newConf["gain"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
// check if config has bandwidth key
|
||||
if (newConf.find("bandwidth") != newConf.end()) {
|
||||
peakCoEffsBandwidth(newConf["freq"], newConf["gain"],
|
||||
newConf["bandwidth"]);
|
||||
} else {
|
||||
peakCoEffs(newConf["freq"], newConf["gain"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
case Type::Notch:
|
||||
if (newConf.find("bandwidth") != newConf.end())
|
||||
{
|
||||
notchCoEffsBandwidth(newConf["freq"], newConf["gain"], newConf["bandwidth"]);
|
||||
}
|
||||
else
|
||||
{
|
||||
notchCoEffs(newConf["freq"], newConf["gain"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
if (newConf.find("bandwidth") != newConf.end()) {
|
||||
notchCoEffsBandwidth(newConf["freq"], newConf["gain"],
|
||||
newConf["bandwidth"]);
|
||||
} else {
|
||||
notchCoEffs(newConf["freq"], newConf["gain"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
case Type::Bandpass:
|
||||
if (newConf.find("bandwidth") != newConf.end())
|
||||
{
|
||||
bandPassCoEffsBandwidth(newConf["freq"], newConf["bandwidth"]);
|
||||
}
|
||||
else
|
||||
{
|
||||
bandPassCoEffs(newConf["freq"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
if (newConf.find("bandwidth") != newConf.end()) {
|
||||
bandPassCoEffsBandwidth(newConf["freq"], newConf["bandwidth"]);
|
||||
} else {
|
||||
bandPassCoEffs(newConf["freq"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
case Type::Allpass:
|
||||
if (newConf.find("bandwidth") != newConf.end())
|
||||
{
|
||||
allPassCoEffsBandwidth(newConf["freq"], newConf["bandwidth"]);
|
||||
}
|
||||
else
|
||||
{
|
||||
allPassCoEffs(newConf["freq"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
if (newConf.find("bandwidth") != newConf.end()) {
|
||||
allPassCoEffsBandwidth(newConf["freq"], newConf["bandwidth"]);
|
||||
} else {
|
||||
allPassCoEffs(newConf["freq"], newConf["q"]);
|
||||
}
|
||||
break;
|
||||
case Type::AllpassFO:
|
||||
allPassFOCoEffs(newConf["freq"]);
|
||||
break;
|
||||
}
|
||||
allPassFOCoEffs(newConf["freq"]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// coefficients for a high pass biquad filter
|
||||
void Biquad::highPassCoEffs(float f, float q)
|
||||
{
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2 * q);
|
||||
void Biquad::highPassCoEffs(float f, float q) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2 * q);
|
||||
|
||||
float b0 = (1 + c) / 2;
|
||||
float b1 = -(1 + c);
|
||||
float b2 = b0;
|
||||
float a0 = 1 + alpha;
|
||||
float a1 = -2 * c;
|
||||
float a2 = 1 - alpha;
|
||||
float b0 = (1 + c) / 2;
|
||||
float b1 = -(1 + c);
|
||||
float b2 = b0;
|
||||
float a0 = 1 + alpha;
|
||||
float a1 = -2 * c;
|
||||
float a2 = 1 - alpha;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
// coefficients for a high pass first order biquad filter
|
||||
void Biquad::highPassFOCoEffs(float f)
|
||||
{
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float k = tanf(w0 / 2.0);
|
||||
void Biquad::highPassFOCoEffs(float f) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float k = tanf(w0 / 2.0);
|
||||
|
||||
float alpha = 1.0 + k;
|
||||
float alpha = 1.0 + k;
|
||||
|
||||
float b0 = 1.0 / alpha;
|
||||
float b1 = -1.0 / alpha;
|
||||
float b2 = 0.0;
|
||||
float a0 = 1.0;
|
||||
float a1 = -(1.0 - k) / alpha;
|
||||
float a2 = 0.0;
|
||||
float b0 = 1.0 / alpha;
|
||||
float b1 = -1.0 / alpha;
|
||||
float b2 = 0.0;
|
||||
float a0 = 1.0;
|
||||
float a1 = -(1.0 - k) / alpha;
|
||||
float a2 = 0.0;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
// coefficients for a low pass biquad filter
|
||||
void Biquad::lowPassCoEffs(float f, float q)
|
||||
{
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2 * q);
|
||||
void Biquad::lowPassCoEffs(float f, float q) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2 * q);
|
||||
|
||||
float b0 = (1 - c) / 2;
|
||||
float b1 = 1 - c;
|
||||
float b2 = b0;
|
||||
float a0 = 1 + alpha;
|
||||
float a1 = -2 * c;
|
||||
float a2 = 1 - alpha;
|
||||
float b0 = (1 - c) / 2;
|
||||
float b1 = 1 - c;
|
||||
float b2 = b0;
|
||||
float a0 = 1 + alpha;
|
||||
float a1 = -2 * c;
|
||||
float a2 = 1 - alpha;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
// coefficients for a low pass first order biquad filter
|
||||
void Biquad::lowPassFOCoEffs(float f)
|
||||
{
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float k = tanf(w0 / 2.0);
|
||||
void Biquad::lowPassFOCoEffs(float f) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float k = tanf(w0 / 2.0);
|
||||
|
||||
float alpha = 1.0 + k;
|
||||
float alpha = 1.0 + k;
|
||||
|
||||
float b0 = k / alpha;
|
||||
float b1 = k / alpha;
|
||||
float b2 = 0.0;
|
||||
float a0 = 1.0;
|
||||
float a1 = -(1.0 - k) / alpha;
|
||||
float a2 = 0.0;
|
||||
float b0 = k / alpha;
|
||||
float b1 = k / alpha;
|
||||
float b2 = 0.0;
|
||||
float a0 = 1.0;
|
||||
float a1 = -(1.0 - k) / alpha;
|
||||
float a2 = 0.0;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
// coefficients for a peak biquad filter
|
||||
void Biquad::peakCoEffs(float f, float gain, float q)
|
||||
{
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2 * q);
|
||||
void Biquad::peakCoEffs(float f, float gain, float q) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2 * q);
|
||||
|
||||
float ampl = std::pow(10.0f, gain / 40.0f);
|
||||
float b0 = 1.0 + (alpha * ampl);
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0 - (alpha * ampl);
|
||||
float a0 = 1 + (alpha / ampl);
|
||||
float a1 = -2 * c;
|
||||
float a2 = 1 - (alpha / ampl);
|
||||
float ampl = std::pow(10.0f, gain / 40.0f);
|
||||
float b0 = 1.0 + (alpha * ampl);
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0 - (alpha * ampl);
|
||||
float a0 = 1 + (alpha / ampl);
|
||||
float a1 = -2 * c;
|
||||
float a2 = 1 - (alpha / ampl);
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
void Biquad::peakCoEffsBandwidth(float f, float gain, float bandwidth)
|
||||
{
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s * sinh(logf(2.0) / 2.0 * bandwidth * w0 / s);
|
||||
void Biquad::peakCoEffsBandwidth(float f, float gain, float bandwidth) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s * sinh(logf(2.0) / 2.0 * bandwidth * w0 / s);
|
||||
|
||||
float ampl = std::pow(10.0f, gain / 40.0f);
|
||||
float b0 = 1.0 + (alpha * ampl);
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0 - (alpha * ampl);
|
||||
float a0 = 1 + (alpha / ampl);
|
||||
float a1 = -2 * c;
|
||||
float a2 = 1 - (alpha / ampl);
|
||||
float ampl = std::pow(10.0f, gain / 40.0f);
|
||||
float b0 = 1.0 + (alpha * ampl);
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0 - (alpha * ampl);
|
||||
float a0 = 1 + (alpha / ampl);
|
||||
float a1 = -2 * c;
|
||||
float a2 = 1 - (alpha / ampl);
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
void Biquad::highShelfCoEffs(float f, float gain, float q)
|
||||
{
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2 * q);
|
||||
float beta = s * sqrtf(A) / q;
|
||||
float b0 = A * ((A + 1.0) + (A - 1.0) * c + beta);
|
||||
float b1 = -2.0 * A * ((A - 1.0) + (A + 1.0) * c);
|
||||
float b2 = A * ((A + 1.0) + (A - 1.0) * c - beta);
|
||||
float a0 = (A + 1.0) - (A - 1.0) * c + beta;
|
||||
float a1 = 2.0 * ((A - 1.0) - (A + 1.0) * c);
|
||||
float a2 = (A + 1.0) - (A - 1.0) * c - beta;
|
||||
void Biquad::highShelfCoEffs(float f, float gain, float q) {
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2 * q);
|
||||
float beta = s * sqrtf(A) / q;
|
||||
float b0 = A * ((A + 1.0) + (A - 1.0) * c + beta);
|
||||
float b1 = -2.0 * A * ((A - 1.0) + (A + 1.0) * c);
|
||||
float b2 = A * ((A + 1.0) + (A - 1.0) * c - beta);
|
||||
float a0 = (A + 1.0) - (A - 1.0) * c + beta;
|
||||
float a1 = 2.0 * ((A - 1.0) - (A + 1.0) * c);
|
||||
float a2 = (A + 1.0) - (A - 1.0) * c - beta;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
void Biquad::highShelfCoEffsSlope(float f, float gain, float slope)
|
||||
{
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha =
|
||||
s / 2.0 * sqrtf((A + 1.0 / A) * (1.0 / (slope / 12.0) - 1.0) + 2.0);
|
||||
float beta = 2.0 * sqrtf(A) * alpha;
|
||||
float b0 = A * ((A + 1.0) + (A - 1.0) * c + beta);
|
||||
float b1 = -2.0 * A * ((A - 1.0) + (A + 1.0) * c);
|
||||
float b2 = A * ((A + 1.0) + (A - 1.0) * c - beta);
|
||||
float a0 = (A + 1.0) - (A - 1.0) * c + beta;
|
||||
float a1 = 2.0 * ((A - 1.0) - (A + 1.0) * c);
|
||||
float a2 = (A + 1.0) - (A - 1.0) * c - beta;
|
||||
void Biquad::highShelfCoEffsSlope(float f, float gain, float slope) {
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha =
|
||||
s / 2.0 * sqrtf((A + 1.0 / A) * (1.0 / (slope / 12.0) - 1.0) + 2.0);
|
||||
float beta = 2.0 * sqrtf(A) * alpha;
|
||||
float b0 = A * ((A + 1.0) + (A - 1.0) * c + beta);
|
||||
float b1 = -2.0 * A * ((A - 1.0) + (A + 1.0) * c);
|
||||
float b2 = A * ((A + 1.0) + (A - 1.0) * c - beta);
|
||||
float a0 = (A + 1.0) - (A - 1.0) * c + beta;
|
||||
float a1 = 2.0 * ((A - 1.0) - (A + 1.0) * c);
|
||||
float a2 = (A + 1.0) - (A - 1.0) * c - beta;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
void Biquad::highShelfFOCoEffs(float f, float gain)
|
||||
{
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float tn = tanf(w0 / 2.0);
|
||||
void Biquad::highShelfFOCoEffs(float f, float gain) {
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float tn = tanf(w0 / 2.0);
|
||||
|
||||
float b0 = A * tn + std::pow(A, 2);
|
||||
float b1 = A * tn - std::pow(A, 2);
|
||||
float b2 = 0.0;
|
||||
float a0 = A * tn + 1.0;
|
||||
float a1 = A * tn - 1.0;
|
||||
float a2 = 0.0;
|
||||
float b0 = A * tn + std::pow(A, 2);
|
||||
float b1 = A * tn - std::pow(A, 2);
|
||||
float b2 = 0.0;
|
||||
float a0 = A * tn + 1.0;
|
||||
float a1 = A * tn - 1.0;
|
||||
float a2 = 0.0;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
void Biquad::lowShelfCoEffs(float f, float gain, float q) {
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float beta = s * sqrtf(A) / q;
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float beta = s * sqrtf(A) / q;
|
||||
|
||||
float b0 = A * ((A + 1.0) - (A - 1.0) * c + beta);
|
||||
float b1 = 2.0 * A * ((A - 1.0) - (A + 1.0) * c);
|
||||
float b2 = A * ((A + 1.0) - (A - 1.0) * c - beta);
|
||||
float a0 = (A + 1.0) + (A - 1.0) * c + beta;
|
||||
float a1 = -2.0 * ((A - 1.0) + (A + 1.0) * c);
|
||||
float a2 = (A + 1.0) + (A - 1.0) * c - beta;
|
||||
float b0 = A * ((A + 1.0) - (A - 1.0) * c + beta);
|
||||
float b1 = 2.0 * A * ((A - 1.0) - (A + 1.0) * c);
|
||||
float b2 = A * ((A + 1.0) - (A - 1.0) * c - beta);
|
||||
float a0 = (A + 1.0) + (A - 1.0) * c + beta;
|
||||
float a1 = -2.0 * ((A - 1.0) + (A + 1.0) * c);
|
||||
float a2 = (A + 1.0) + (A - 1.0) * c - beta;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
void Biquad::lowShelfCoEffsSlope(float f, float gain, float slope) {
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha =
|
||||
s / 2.0 * sqrtf((A + 1.0 / A) * (1.0 / (slope / 12.0) - 1.0) + 2.0);
|
||||
float beta = 2.0 * sqrtf(A) * alpha;
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha =
|
||||
s / 2.0 * sqrtf((A + 1.0 / A) * (1.0 / (slope / 12.0) - 1.0) + 2.0);
|
||||
float beta = 2.0 * sqrtf(A) * alpha;
|
||||
|
||||
float b0 = A * ((A + 1.0) - (A - 1.0) * c + beta);
|
||||
float b1 = 2.0 * A * ((A - 1.0) - (A + 1.0) * c);
|
||||
float b2 = A * ((A + 1.0) - (A - 1.0) * c - beta);
|
||||
float a0 = (A + 1.0) + (A - 1.0) * c + beta;
|
||||
float a1 = -2.0 * ((A - 1.0) + (A + 1.0) * c);
|
||||
float a2 = (A + 1.0) + (A - 1.0) * c - beta;
|
||||
float b0 = A * ((A + 1.0) - (A - 1.0) * c + beta);
|
||||
float b1 = 2.0 * A * ((A - 1.0) - (A + 1.0) * c);
|
||||
float b2 = A * ((A + 1.0) - (A - 1.0) * c - beta);
|
||||
float a0 = (A + 1.0) + (A - 1.0) * c + beta;
|
||||
float a1 = -2.0 * ((A - 1.0) + (A + 1.0) * c);
|
||||
float a2 = (A + 1.0) + (A - 1.0) * c - beta;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
void Biquad::lowShelfFOCoEffs(float f, float gain) {
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float tn = tanf(w0 / 2.0);
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float tn = tanf(w0 / 2.0);
|
||||
|
||||
float b0 = std::pow(A, 2) * tn + A;
|
||||
float b1 = std::pow(A, 2) * tn - A;
|
||||
float b2 = 0.0;
|
||||
float a0 = tn + A;
|
||||
float a1 = tn - A;
|
||||
float a2 = 0.0;
|
||||
float b0 = std::pow(A, 2) * tn + A;
|
||||
float b1 = std::pow(A, 2) * tn - A;
|
||||
float b2 = 0.0;
|
||||
float a0 = tn + A;
|
||||
float a1 = tn - A;
|
||||
float a2 = 0.0;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
void Biquad::notchCoEffs(float f, float gain, float q) {
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2.0 * q);
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2.0 * q);
|
||||
|
||||
float b0 = 1.0;
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
float b0 = 1.0;
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
void Biquad::notchCoEffsBandwidth(float f, float gain, float bandwidth) {
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s * sinh(logf(2.0) / 2.0 * bandwidth * w0 / s);
|
||||
float A = std::pow(10.0f, gain / 40.0f);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s * sinh(logf(2.0) / 2.0 * bandwidth * w0 / s);
|
||||
|
||||
float b0 = 1.0;
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
float b0 = 1.0;
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
void Biquad::bandPassCoEffs(float f, float q) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2.0 * q);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2.0 * q);
|
||||
|
||||
float b0 = alpha;
|
||||
float b1 = 0.0;
|
||||
float b2 = -alpha;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
float b0 = alpha;
|
||||
float b1 = 0.0;
|
||||
float b2 = -alpha;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
void Biquad::bandPassCoEffsBandwidth(float f, float bandwidth) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s * sinh(logf(2.0) / 2.0 * bandwidth * w0 / s);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s * sinh(logf(2.0) / 2.0 * bandwidth * w0 / s);
|
||||
|
||||
float b0 = alpha;
|
||||
float b1 = 0.0;
|
||||
float b2 = -alpha;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
float b0 = alpha;
|
||||
float b1 = 0.0;
|
||||
float b2 = -alpha;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
void Biquad::allPassCoEffs(float f, float q) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2.0 * q);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s / (2.0 * q);
|
||||
|
||||
float b0 = 1.0 - alpha;
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0 + alpha;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
float b0 = 1.0 - alpha;
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0 + alpha;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
void Biquad::allPassCoEffsBandwidth(float f, float bandwidth) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s * sinh(logf(2.0) / 2.0 * bandwidth * w0 / s);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float c = cosf(w0);
|
||||
float s = sinf(w0);
|
||||
float alpha = s * sinh(logf(2.0) / 2.0 * bandwidth * w0 / s);
|
||||
|
||||
float b0 = 1.0 - alpha;
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0 + alpha;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
float b0 = 1.0 - alpha;
|
||||
float b1 = -2.0 * c;
|
||||
float b2 = 1.0 + alpha;
|
||||
float a0 = 1.0 + alpha;
|
||||
float a1 = -2.0 * c;
|
||||
float a2 = 1.0 - alpha;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
void Biquad::allPassFOCoEffs(float f) {
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float tn = tanf(w0 / 2.0);
|
||||
float w0 = 2 * M_PI * f / this->sampleRate;
|
||||
float tn = tanf(w0 / 2.0);
|
||||
|
||||
float alpha = (tn + 1.0) / (tn - 1.0);
|
||||
float alpha = (tn + 1.0) / (tn - 1.0);
|
||||
|
||||
float b0 = 1.0;
|
||||
float b1 = alpha;
|
||||
float b2 = 0.0;
|
||||
float a0 = alpha;
|
||||
float a1 = 1.0;
|
||||
float a2 = 0.0;
|
||||
float b0 = 1.0;
|
||||
float b1 = alpha;
|
||||
float b2 = 0.0;
|
||||
float a0 = alpha;
|
||||
float a1 = 1.0;
|
||||
float a2 = 0.0;
|
||||
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
this->normalizeCoEffs(a0, a1, a2, b0, b1, b2);
|
||||
}
|
||||
|
||||
void Biquad::normalizeCoEffs(float a0, float a1, float a2, float b0, float b1, float b2)
|
||||
{
|
||||
coeffs[0] = b0 / a0;
|
||||
coeffs[1] = b1 / a0;
|
||||
coeffs[2] = b2 / a0;
|
||||
coeffs[3] = a1 / a0;
|
||||
coeffs[4] = a2 / a0;
|
||||
void Biquad::normalizeCoEffs(float a0, float a1, float a2, float b0, float b1,
|
||||
float b2) {
|
||||
coeffs[0] = b0 / a0;
|
||||
coeffs[1] = b1 / a0;
|
||||
coeffs[2] = b2 / a0;
|
||||
coeffs[3] = a1 / a0;
|
||||
coeffs[4] = a2 / a0;
|
||||
}
|
||||
|
||||
std::unique_ptr<StreamInfo> Biquad::process(std::unique_ptr<StreamInfo> stream)
|
||||
{
|
||||
std::scoped_lock lock(accessMutex);
|
||||
std::unique_ptr<StreamInfo> Biquad::process(
|
||||
std::unique_ptr<StreamInfo> stream) {
|
||||
std::scoped_lock lock(accessMutex);
|
||||
|
||||
auto input = stream->data[this->channel];
|
||||
auto numSamples = stream->numSamples;
|
||||
auto input = stream->data[this->channel];
|
||||
auto numSamples = stream->numSamples;
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
dsps_biquad_f32_ae32(input, input, numSamples, coeffs, w);
|
||||
dsps_biquad_f32_ae32(input, input, numSamples, coeffs, w);
|
||||
#else
|
||||
// Apply the set coefficients
|
||||
for (int i = 0; i < numSamples; i++)
|
||||
{
|
||||
float d0 = input[i] - coeffs[3] * w[0] - coeffs[4] * w[1];
|
||||
input[i] = coeffs[0] * d0 + coeffs[1] * w[0] + coeffs[2] * w[1];
|
||||
w[1] = w[0];
|
||||
w[0] = d0;
|
||||
}
|
||||
// Apply the set coefficients
|
||||
for (int i = 0; i < numSamples; i++) {
|
||||
float d0 = input[i] - coeffs[3] * w[0] - coeffs[4] * w[1];
|
||||
input[i] = coeffs[0] * d0 + coeffs[1] * w[0] + coeffs[2] * w[1];
|
||||
w[1] = w[0];
|
||||
w[0] = d0;
|
||||
}
|
||||
#endif
|
||||
|
||||
return stream;
|
||||
return stream;
|
||||
};
|
||||
|
||||
@@ -1,109 +1,89 @@
|
||||
#include "BiquadCombo.h"
|
||||
|
||||
#include <stdio.h> // for printf
|
||||
#include <cmath> // for sinf, M_PI
|
||||
#include <utility> // for move
|
||||
|
||||
using namespace bell;
|
||||
|
||||
BiquadCombo::BiquadCombo()
|
||||
{
|
||||
BiquadCombo::BiquadCombo() {}
|
||||
|
||||
void BiquadCombo::sampleRateChanged(uint32_t sampleRate) {
|
||||
for (auto& biquad : biquads) {
|
||||
biquad->sampleRateChanged(sampleRate);
|
||||
}
|
||||
}
|
||||
|
||||
void BiquadCombo::sampleRateChanged(uint32_t sampleRate)
|
||||
{
|
||||
for (auto &biquad : biquads)
|
||||
{
|
||||
biquad->sampleRateChanged(sampleRate);
|
||||
}
|
||||
std::vector<float> BiquadCombo::calculateBWQ(int order) {
|
||||
|
||||
std::vector<float> qValues;
|
||||
for (int n = 0; n < order / 2; n++) {
|
||||
float q = 1.0f / (2.0f * sinf(M_PI / order * (((float)n) + 0.5)));
|
||||
qValues.push_back(q);
|
||||
}
|
||||
|
||||
if (order % 2 > 0) {
|
||||
qValues.push_back(-1.0);
|
||||
}
|
||||
|
||||
printf("%d\n", qValues.size());
|
||||
|
||||
return qValues;
|
||||
}
|
||||
|
||||
std::vector<float> BiquadCombo::calculateBWQ(int order)
|
||||
{
|
||||
std::vector<float> BiquadCombo::calculateLRQ(int order) {
|
||||
auto qValues = calculateBWQ(order / 2);
|
||||
|
||||
std::vector<float> qValues;
|
||||
for (int n = 0; n < order / 2; n++)
|
||||
{
|
||||
float q = 1.0f / (2.0f * sinf(M_PI / order * (((float)n) + 0.5)));
|
||||
qValues.push_back(q);
|
||||
}
|
||||
if (order % 4 > 0) {
|
||||
qValues.pop_back();
|
||||
qValues.insert(qValues.end(), qValues.begin(), qValues.end());
|
||||
qValues.push_back(0.5);
|
||||
} else {
|
||||
qValues.insert(qValues.end(), qValues.begin(), qValues.end());
|
||||
}
|
||||
|
||||
if (order % 2 > 0)
|
||||
{
|
||||
qValues.push_back(-1.0);
|
||||
}
|
||||
|
||||
printf("%d\n", qValues.size());
|
||||
|
||||
return qValues;
|
||||
return qValues;
|
||||
}
|
||||
|
||||
std::vector<float> BiquadCombo::calculateLRQ(int order)
|
||||
{
|
||||
auto qValues = calculateBWQ(order / 2);
|
||||
|
||||
if (order % 4 > 0)
|
||||
{
|
||||
qValues.pop_back();
|
||||
qValues.insert(qValues.end(), qValues.begin(), qValues.end());
|
||||
qValues.push_back(0.5);
|
||||
}
|
||||
else
|
||||
{
|
||||
qValues.insert(qValues.end(), qValues.begin(), qValues.end());
|
||||
}
|
||||
|
||||
return qValues;
|
||||
void BiquadCombo::butterworth(float freq, int order, FilterType type) {
|
||||
std::vector<float> qValues = calculateBWQ(order);
|
||||
for (auto& q : qValues) {}
|
||||
}
|
||||
|
||||
void BiquadCombo::butterworth(float freq, int order, FilterType type)
|
||||
{
|
||||
std::vector<float> qValues = calculateBWQ(order);
|
||||
for (auto &q : qValues)
|
||||
{
|
||||
void BiquadCombo::linkwitzRiley(float freq, int order, FilterType type) {
|
||||
std::vector<float> qValues = calculateLRQ(order);
|
||||
for (auto& q : qValues) {
|
||||
auto filter = std::make_unique<Biquad>();
|
||||
filter->channel = channel;
|
||||
|
||||
auto config = std::map<std::string, float>();
|
||||
config["freq"] = freq;
|
||||
config["q"] = q;
|
||||
|
||||
if (q >= 0.0) {
|
||||
if (type == FilterType::Highpass) {
|
||||
filter->configure(Biquad::Type::Highpass, config);
|
||||
} else {
|
||||
filter->configure(Biquad::Type::Lowpass, config);
|
||||
}
|
||||
} else {
|
||||
if (type == FilterType::Highpass) {
|
||||
filter->configure(Biquad::Type::HighpassFO, config);
|
||||
} else {
|
||||
filter->configure(Biquad::Type::LowpassFO, config);
|
||||
}
|
||||
}
|
||||
|
||||
this->biquads.push_back(std::move(filter));
|
||||
}
|
||||
}
|
||||
|
||||
void BiquadCombo::linkwitzRiley(float freq, int order, FilterType type)
|
||||
{
|
||||
std::vector<float> qValues = calculateLRQ(order);
|
||||
for (auto &q : qValues)
|
||||
{
|
||||
auto filter = std::make_unique<Biquad>();
|
||||
filter->channel = channel;
|
||||
std::unique_ptr<StreamInfo> BiquadCombo::process(
|
||||
std::unique_ptr<StreamInfo> data) {
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
for (auto& transform : this->biquads) {
|
||||
data = transform->process(std::move(data));
|
||||
}
|
||||
|
||||
auto config = std::map<std::string, float>();
|
||||
config["freq"] = freq;
|
||||
config["q"] = q;
|
||||
|
||||
if (q >= 0.0)
|
||||
{
|
||||
if (type == FilterType::Highpass)
|
||||
{
|
||||
filter->configure(Biquad::Type::Highpass, config);
|
||||
}
|
||||
else
|
||||
{
|
||||
filter->configure(Biquad::Type::Lowpass, config);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (type == FilterType::Highpass)
|
||||
{
|
||||
filter->configure(Biquad::Type::HighpassFO, config);
|
||||
}
|
||||
else
|
||||
{
|
||||
filter->configure(Biquad::Type::LowpassFO, config);
|
||||
}
|
||||
}
|
||||
|
||||
this->biquads.push_back(std::move(filter));
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<StreamInfo> BiquadCombo::process(std::unique_ptr<StreamInfo> data) {
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
for (auto &transform : this->biquads) {
|
||||
data = transform->process(std::move(data));
|
||||
}
|
||||
|
||||
return data;
|
||||
return data;
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
#include "Compressor.h"
|
||||
|
||||
#include <cstdlib> // for abs
|
||||
|
||||
using namespace bell;
|
||||
|
||||
float log2f_approx(float X) {
|
||||
@@ -19,11 +21,11 @@ float log2f_approx(float X) {
|
||||
|
||||
Compressor::Compressor() {}
|
||||
|
||||
void Compressor::sumChannels(std::unique_ptr<StreamInfo> &data) {
|
||||
void Compressor::sumChannels(std::unique_ptr<StreamInfo>& data) {
|
||||
tmp.resize(data->numSamples);
|
||||
for (int i = 0; i < data->numSamples; i++) {
|
||||
float sum = 0.0f;
|
||||
for (auto &channel : channels) {
|
||||
for (auto& channel : channels) {
|
||||
sum += data->data[channel][i];
|
||||
}
|
||||
tmp[i] = sum;
|
||||
@@ -31,7 +33,7 @@ void Compressor::sumChannels(std::unique_ptr<StreamInfo> &data) {
|
||||
}
|
||||
|
||||
void Compressor::calLoudness() {
|
||||
for (auto &value : tmp) {
|
||||
for (auto& value : tmp) {
|
||||
value = 20 * log10f_fast(std::abs(value) + 1.0e-9f);
|
||||
if (value >= lastLoudness) {
|
||||
value = attack * lastLoudness + (1.0 - attack) * value;
|
||||
@@ -44,7 +46,7 @@ void Compressor::calLoudness() {
|
||||
}
|
||||
|
||||
void Compressor::calGain() {
|
||||
for (auto &value : tmp) {
|
||||
for (auto& value : tmp) {
|
||||
if (value > threshold) {
|
||||
value = -(value - threshold) * (factor - 1.0) / factor;
|
||||
} else {
|
||||
@@ -58,9 +60,9 @@ void Compressor::calGain() {
|
||||
}
|
||||
}
|
||||
|
||||
void Compressor::applyGain(std::unique_ptr<StreamInfo> &data) {
|
||||
void Compressor::applyGain(std::unique_ptr<StreamInfo>& data) {
|
||||
for (int i = 0; i < data->numSamples; i++) {
|
||||
for (auto &channel : channels) {
|
||||
for (auto& channel : channels) {
|
||||
data->data[channel][i] *= tmp[i];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,31 +1,29 @@
|
||||
#include "Gain.h"
|
||||
|
||||
#include <cmath> // for pow
|
||||
#include <string> // for string
|
||||
|
||||
using namespace bell;
|
||||
|
||||
Gain::Gain() : AudioTransform()
|
||||
{
|
||||
this->gainFactor = 1.0f;
|
||||
this->filterType = "gain";
|
||||
Gain::Gain() : AudioTransform() {
|
||||
this->gainFactor = 1.0f;
|
||||
this->filterType = "gain";
|
||||
}
|
||||
|
||||
void Gain::configure(std::vector<int> channels, float gainDB)
|
||||
{
|
||||
this->channels = channels;
|
||||
this->gainDb = gainDB;
|
||||
this->gainFactor = std::pow(10.0f, gainDB / 20.0f);
|
||||
void Gain::configure(std::vector<int> channels, float gainDB) {
|
||||
this->channels = channels;
|
||||
this->gainDb = gainDB;
|
||||
this->gainFactor = std::pow(10.0f, gainDB / 20.0f);
|
||||
}
|
||||
|
||||
std::unique_ptr<StreamInfo> Gain::process(std::unique_ptr<StreamInfo> data)
|
||||
{
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
for (int i = 0; i < data->numSamples; i++)
|
||||
{
|
||||
// Apply gain to all channels
|
||||
for (auto &channel : channels)
|
||||
{
|
||||
data->data[channel][i] *= gainFactor;
|
||||
}
|
||||
std::unique_ptr<StreamInfo> Gain::process(std::unique_ptr<StreamInfo> data) {
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
for (int i = 0; i < data->numSamples; i++) {
|
||||
// Apply gain to all channels
|
||||
for (auto& channel : channels) {
|
||||
data->data[channel][i] *= gainFactor;
|
||||
}
|
||||
}
|
||||
|
||||
return data;
|
||||
return data;
|
||||
}
|
||||
@@ -1,89 +1,80 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <cJSON.h>
|
||||
#include <cJSON.h> // for cJSON_GetObjectItem, cJSON, cJSON_IsArray
|
||||
#include <stddef.h> // for NULL
|
||||
#include <algorithm> // for find
|
||||
#include <cstdint> // for uint8_t
|
||||
#include <memory> // for unique_ptr
|
||||
#include <stdexcept> // for invalid_argument
|
||||
#include <vector> // for vector
|
||||
|
||||
#include "AudioTransform.h"
|
||||
#include "AudioTransform.h" // for AudioTransform
|
||||
#include "StreamInfo.h" // for StreamInfo
|
||||
|
||||
namespace bell
|
||||
{
|
||||
class AudioMixer : public bell::AudioTransform
|
||||
{
|
||||
public:
|
||||
enum DownmixMode
|
||||
{
|
||||
DEFAULT
|
||||
};
|
||||
namespace bell {
|
||||
class AudioMixer : public bell::AudioTransform {
|
||||
public:
|
||||
enum DownmixMode { DEFAULT };
|
||||
|
||||
struct MixerConfig
|
||||
{
|
||||
std::vector<int> source;
|
||||
int destination;
|
||||
};
|
||||
struct MixerConfig {
|
||||
std::vector<int> source;
|
||||
int destination;
|
||||
};
|
||||
|
||||
AudioMixer();
|
||||
~AudioMixer(){};
|
||||
// Amount of channels in the input
|
||||
int from;
|
||||
AudioMixer();
|
||||
~AudioMixer(){};
|
||||
// Amount of channels in the input
|
||||
int from;
|
||||
|
||||
// Amount of channels in the output
|
||||
int to;
|
||||
// Amount of channels in the output
|
||||
int to;
|
||||
|
||||
// Configuration of each channels in the mixer
|
||||
std::vector<MixerConfig> mixerConfig;
|
||||
// Configuration of each channels in the mixer
|
||||
std::vector<MixerConfig> mixerConfig;
|
||||
|
||||
std::unique_ptr<StreamInfo> process(std::unique_ptr<StreamInfo> data) override;
|
||||
std::unique_ptr<StreamInfo> process(
|
||||
std::unique_ptr<StreamInfo> data) override;
|
||||
|
||||
void reconfigure() override
|
||||
{
|
||||
void reconfigure() override {}
|
||||
|
||||
void fromJSON(cJSON* json) {
|
||||
cJSON* mappedChannels = cJSON_GetObjectItem(json, "mapped_channels");
|
||||
|
||||
if (mappedChannels == NULL || !cJSON_IsArray(mappedChannels)) {
|
||||
throw std::invalid_argument("Mixer configuration invalid");
|
||||
}
|
||||
|
||||
this->mixerConfig = std::vector<MixerConfig>();
|
||||
|
||||
cJSON* iterator = NULL;
|
||||
cJSON_ArrayForEach(iterator, mappedChannels) {
|
||||
std::vector<int> sources(0);
|
||||
cJSON* iteratorNested = NULL;
|
||||
cJSON_ArrayForEach(iteratorNested,
|
||||
cJSON_GetObjectItem(iterator, "source")) {
|
||||
sources.push_back(iteratorNested->valueint);
|
||||
}
|
||||
|
||||
int destination = cJSON_GetObjectItem(iterator, "destination")->valueint;
|
||||
|
||||
this->mixerConfig.push_back(
|
||||
MixerConfig{.source = sources, .destination = destination});
|
||||
}
|
||||
|
||||
std::vector<uint8_t> sources(0);
|
||||
|
||||
for (auto& config : mixerConfig) {
|
||||
|
||||
for (auto& source : config.source) {
|
||||
if (std::find(sources.begin(), sources.end(), source) ==
|
||||
sources.end()) {
|
||||
sources.push_back(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void fromJSON(cJSON *json)
|
||||
{
|
||||
cJSON *mappedChannels = cJSON_GetObjectItem(json, "mapped_channels");
|
||||
|
||||
if (mappedChannels == NULL || !cJSON_IsArray(mappedChannels))
|
||||
{
|
||||
throw std::invalid_argument("Mixer configuration invalid");
|
||||
}
|
||||
|
||||
this->mixerConfig = std::vector<MixerConfig>();
|
||||
|
||||
cJSON *iterator = NULL;
|
||||
cJSON_ArrayForEach(iterator, mappedChannels)
|
||||
{
|
||||
std::vector<int> sources(0);
|
||||
cJSON *iteratorNested = NULL;
|
||||
cJSON_ArrayForEach(iteratorNested, cJSON_GetObjectItem(iterator, "source"))
|
||||
{
|
||||
sources.push_back(iteratorNested->valueint);
|
||||
}
|
||||
|
||||
int destination = cJSON_GetObjectItem(iterator, "destination")->valueint;
|
||||
|
||||
this->mixerConfig.push_back(MixerConfig{
|
||||
.source = sources,
|
||||
.destination = destination
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<uint8_t> sources(0);
|
||||
|
||||
for (auto &config : mixerConfig)
|
||||
{
|
||||
|
||||
for (auto &source : config.source)
|
||||
{
|
||||
if (std::find(sources.begin(), sources.end(), source) == sources.end())
|
||||
{
|
||||
sources.push_back(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this->from = sources.size();
|
||||
this->to = mixerConfig.size();
|
||||
}
|
||||
};
|
||||
}
|
||||
this->from = sources.size();
|
||||
this->to = mixerConfig.size();
|
||||
}
|
||||
};
|
||||
} // namespace bell
|
||||
@@ -1,28 +1,29 @@
|
||||
#pragma once
|
||||
|
||||
#include "AudioTransform.h"
|
||||
#include "StreamInfo.h"
|
||||
#include <memory>
|
||||
#include "Gain.h"
|
||||
#include <mutex>
|
||||
#include <memory> // for shared_ptr, unique_ptr
|
||||
#include <mutex> // for mutex
|
||||
#include <vector> // for vector
|
||||
|
||||
namespace bell
|
||||
{
|
||||
class AudioPipeline
|
||||
{
|
||||
private:
|
||||
std::shared_ptr<Gain> headroomGainTransform;
|
||||
#include "StreamInfo.h" // for StreamInfo
|
||||
|
||||
public:
|
||||
AudioPipeline();
|
||||
~AudioPipeline(){};
|
||||
namespace bell {
|
||||
class AudioTransform;
|
||||
class Gain;
|
||||
|
||||
std::mutex accessMutex;
|
||||
std::vector<std::shared_ptr<AudioTransform>> transforms;
|
||||
class AudioPipeline {
|
||||
private:
|
||||
std::shared_ptr<Gain> headroomGainTransform;
|
||||
|
||||
void recalculateHeadroom();
|
||||
void addTransform(std::shared_ptr<AudioTransform> transform);
|
||||
void volumeUpdated(int volume);
|
||||
std::unique_ptr<StreamInfo> process(std::unique_ptr<StreamInfo> data);
|
||||
};
|
||||
}; // namespace bell
|
||||
public:
|
||||
AudioPipeline();
|
||||
~AudioPipeline(){};
|
||||
|
||||
std::mutex accessMutex;
|
||||
std::vector<std::shared_ptr<AudioTransform>> transforms;
|
||||
|
||||
void recalculateHeadroom();
|
||||
void addTransform(std::shared_ptr<AudioTransform> transform);
|
||||
void volumeUpdated(int volume);
|
||||
std::unique_ptr<StreamInfo> process(std::unique_ptr<StreamInfo> data);
|
||||
};
|
||||
}; // namespace bell
|
||||
@@ -1,29 +1,28 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include "StreamInfo.h"
|
||||
#include "TransformConfig.h"
|
||||
|
||||
namespace bell
|
||||
{
|
||||
class AudioTransform
|
||||
{
|
||||
protected:
|
||||
std::mutex accessMutex;
|
||||
namespace bell {
|
||||
class AudioTransform {
|
||||
protected:
|
||||
std::mutex accessMutex;
|
||||
|
||||
public:
|
||||
virtual std::unique_ptr<StreamInfo> process(std::unique_ptr<StreamInfo> data) = 0;
|
||||
virtual void sampleRateChanged(uint32_t sampleRate){};
|
||||
virtual float calculateHeadroom() { return 0; };
|
||||
public:
|
||||
virtual std::unique_ptr<StreamInfo> process(
|
||||
std::unique_ptr<StreamInfo> data) = 0;
|
||||
virtual void sampleRateChanged(uint32_t sampleRate){};
|
||||
virtual float calculateHeadroom() { return 0; };
|
||||
|
||||
virtual void reconfigure() {};
|
||||
virtual void reconfigure(){};
|
||||
|
||||
std::string filterType;
|
||||
std::unique_ptr<TransformConfig> config;
|
||||
std::string filterType;
|
||||
std::unique_ptr<TransformConfig> config;
|
||||
|
||||
AudioTransform() = default;
|
||||
virtual ~AudioTransform() = default;
|
||||
};
|
||||
};
|
||||
AudioTransform() = default;
|
||||
virtual ~AudioTransform() = default;
|
||||
};
|
||||
}; // namespace bell
|
||||
@@ -1,34 +1,43 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
#include "AudioPipeline.h"
|
||||
#include "CentralAudioBuffer.h"
|
||||
#include <stddef.h> // for size_t
|
||||
#include <stdint.h> // for uint32_t, uint8_t
|
||||
#include <functional> // for function
|
||||
#include <memory> // for shared_ptr, unique_ptr
|
||||
#include <mutex> // for mutex
|
||||
#include <vector> // for vector
|
||||
|
||||
#include "StreamInfo.h" // for BitWidth
|
||||
|
||||
namespace bell {
|
||||
class AudioPipeline;
|
||||
class CentralAudioBuffer;
|
||||
|
||||
#define MAX_INT16 32767
|
||||
|
||||
class BellDSP {
|
||||
public:
|
||||
BellDSP(std::shared_ptr<CentralAudioBuffer> centralAudioBuffer);
|
||||
~BellDSP() {};
|
||||
~BellDSP(){};
|
||||
|
||||
class AudioEffect {
|
||||
public:
|
||||
AudioEffect() = default;
|
||||
~AudioEffect() = default;
|
||||
size_t duration;
|
||||
virtual void apply(float* sampleData, size_t samples, size_t relativePosition) = 0;
|
||||
virtual void apply(float* sampleData, size_t samples,
|
||||
size_t relativePosition) = 0;
|
||||
};
|
||||
|
||||
class FadeEffect: public AudioEffect {
|
||||
private:
|
||||
class FadeEffect : public AudioEffect {
|
||||
private:
|
||||
std::function<void()> onFinish;
|
||||
bool isFadeIn;
|
||||
public:
|
||||
FadeEffect(size_t duration, bool isFadeIn, std::function<void()> onFinish = nullptr);
|
||||
~FadeEffect() {};
|
||||
|
||||
public:
|
||||
FadeEffect(size_t duration, bool isFadeIn,
|
||||
std::function<void()> onFinish = nullptr);
|
||||
~FadeEffect(){};
|
||||
|
||||
void apply(float* sampleData, size_t samples, size_t relativePosition);
|
||||
};
|
||||
@@ -38,8 +47,8 @@ class BellDSP {
|
||||
|
||||
std::shared_ptr<AudioPipeline> getActivePipeline();
|
||||
|
||||
size_t process(uint8_t* data, size_t bytes, int channels,
|
||||
uint32_t sampleRate, BitWidth bitWidth);
|
||||
size_t process(uint8_t* data, size_t bytes, int channels, uint32_t sampleRate,
|
||||
BitWidth bitWidth);
|
||||
|
||||
private:
|
||||
std::shared_ptr<AudioPipeline> activePipeline;
|
||||
@@ -48,7 +57,6 @@ class BellDSP {
|
||||
std::vector<float> dataLeft = std::vector<float>(1024);
|
||||
std::vector<float> dataRight = std::vector<float>(1024);
|
||||
|
||||
|
||||
std::unique_ptr<AudioEffect> underflowEffect = nullptr;
|
||||
std::unique_ptr<AudioEffect> startEffect = nullptr;
|
||||
std::unique_ptr<AudioEffect> instantEffect = nullptr;
|
||||
|
||||
@@ -1,158 +1,158 @@
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
#include <mutex>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
#include <stdint.h> // for uint32_t
|
||||
#include <map> // for map
|
||||
#include <memory> // for unique_ptr, allocator
|
||||
#include <mutex> // for scoped_lock
|
||||
#include <stdexcept> // for invalid_argument
|
||||
#include <string> // for string, operator<, hash, operator==
|
||||
#include <unordered_map> // for operator!=, unordered_map, __hash_map_c...
|
||||
#include <utility> // for pair
|
||||
#include <vector> // for vector
|
||||
|
||||
#include "AudioTransform.h"
|
||||
extern "C" int dsps_biquad_f32_ae32(const float *input, float *output, int len, float *coef, float *w);
|
||||
#include "AudioTransform.h" // for AudioTransform
|
||||
#include "StreamInfo.h" // for StreamInfo
|
||||
#include "TransformConfig.h" // for TransformConfig
|
||||
|
||||
namespace bell
|
||||
{
|
||||
class Biquad : public bell::AudioTransform
|
||||
{
|
||||
public:
|
||||
Biquad();
|
||||
~Biquad(){};
|
||||
extern "C" int dsps_biquad_f32_ae32(const float* input, float* output, int len,
|
||||
float* coef, float* w);
|
||||
|
||||
enum class Type
|
||||
{
|
||||
Free,
|
||||
Highpass,
|
||||
Lowpass,
|
||||
HighpassFO,
|
||||
LowpassFO,
|
||||
namespace bell {
|
||||
class Biquad : public bell::AudioTransform {
|
||||
public:
|
||||
Biquad();
|
||||
~Biquad(){};
|
||||
|
||||
Peaking,
|
||||
Highshelf,
|
||||
HighshelfFO,
|
||||
Lowshelf,
|
||||
LowshelfFO,
|
||||
Notch,
|
||||
Bandpass,
|
||||
Allpass,
|
||||
AllpassFO
|
||||
};
|
||||
enum class Type {
|
||||
Free,
|
||||
Highpass,
|
||||
Lowpass,
|
||||
HighpassFO,
|
||||
LowpassFO,
|
||||
|
||||
std::map<std::string, float> currentConfig;
|
||||
Peaking,
|
||||
Highshelf,
|
||||
HighshelfFO,
|
||||
Lowshelf,
|
||||
LowshelfFO,
|
||||
Notch,
|
||||
Bandpass,
|
||||
Allpass,
|
||||
AllpassFO
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, Type> const strMapType = {
|
||||
{"free", Type::Free},
|
||||
{"highpass", Type::Highpass},
|
||||
{"lowpass", Type::Lowpass},
|
||||
{"highpass_fo", Type::HighpassFO},
|
||||
{"lowpass_fo", Type::LowpassFO},
|
||||
{"peaking", Type::Peaking},
|
||||
{"highshelf", Type::Highshelf},
|
||||
{"highshelf_fo", Type::HighpassFO},
|
||||
{"lowshelf", Type::Lowshelf},
|
||||
{"lowshelf_fo", Type::LowpassFO},
|
||||
{"notch", Type::Notch},
|
||||
{"bandpass", Type::Bandpass},
|
||||
{"allpass", Type::Allpass},
|
||||
{"allpass_fo", Type::AllpassFO},
|
||||
};
|
||||
std::map<std::string, float> currentConfig;
|
||||
|
||||
float freq, q, gain;
|
||||
int channel;
|
||||
Biquad::Type type;
|
||||
std::unordered_map<std::string, Type> const strMapType = {
|
||||
{"free", Type::Free},
|
||||
{"highpass", Type::Highpass},
|
||||
{"lowpass", Type::Lowpass},
|
||||
{"highpass_fo", Type::HighpassFO},
|
||||
{"lowpass_fo", Type::LowpassFO},
|
||||
{"peaking", Type::Peaking},
|
||||
{"highshelf", Type::Highshelf},
|
||||
{"highshelf_fo", Type::HighpassFO},
|
||||
{"lowshelf", Type::Lowshelf},
|
||||
{"lowshelf_fo", Type::LowpassFO},
|
||||
{"notch", Type::Notch},
|
||||
{"bandpass", Type::Bandpass},
|
||||
{"allpass", Type::Allpass},
|
||||
{"allpass_fo", Type::AllpassFO},
|
||||
};
|
||||
|
||||
std::unique_ptr<StreamInfo> process(std::unique_ptr<StreamInfo> data) override;
|
||||
float freq, q, gain;
|
||||
int channel;
|
||||
Biquad::Type type;
|
||||
|
||||
void configure(Type type, std::map<std::string, float> &config);
|
||||
std::unique_ptr<StreamInfo> process(
|
||||
std::unique_ptr<StreamInfo> data) override;
|
||||
|
||||
void sampleRateChanged(uint32_t sampleRate) override;
|
||||
void configure(Type type, std::map<std::string, float>& config);
|
||||
|
||||
void reconfigure() override
|
||||
{
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
std::map<std::string, float> biquadConfig;
|
||||
this->channel = config->getChannels()[0];
|
||||
void sampleRateChanged(uint32_t sampleRate) override;
|
||||
|
||||
float invalid = -0x7C;
|
||||
void reconfigure() override {
|
||||
std::scoped_lock lock(this->accessMutex);
|
||||
std::map<std::string, float> biquadConfig;
|
||||
this->channel = config->getChannels()[0];
|
||||
|
||||
auto type = config->getString("biquad_type");
|
||||
float bandwidth = config->getFloat("bandwidth", false, invalid);
|
||||
float slope = config->getFloat("slope", false, invalid);
|
||||
float gain = config->getFloat("gain", false, invalid);
|
||||
float frequency = config->getFloat("frequency", false, invalid);
|
||||
float q = config->getFloat("q", false, invalid);
|
||||
float invalid = -0x7C;
|
||||
|
||||
if (currentConfig["bandwidth"] == bandwidth &&
|
||||
currentConfig["slope"] == slope &&
|
||||
currentConfig["gain"] == gain &&
|
||||
currentConfig["frequency"] == frequency &&
|
||||
currentConfig["q"] == q)
|
||||
{
|
||||
return;
|
||||
}
|
||||
auto type = config->getString("biquad_type");
|
||||
float bandwidth = config->getFloat("bandwidth", false, invalid);
|
||||
float slope = config->getFloat("slope", false, invalid);
|
||||
float gain = config->getFloat("gain", false, invalid);
|
||||
float frequency = config->getFloat("frequency", false, invalid);
|
||||
float q = config->getFloat("q", false, invalid);
|
||||
|
||||
if (bandwidth != invalid)
|
||||
biquadConfig["bandwidth"] = bandwidth;
|
||||
if (slope != invalid)
|
||||
biquadConfig["slope"] = slope;
|
||||
if (gain != invalid)
|
||||
biquadConfig["gain"] = gain;
|
||||
if (frequency != invalid)
|
||||
biquadConfig["freq"] = frequency;
|
||||
if (q != invalid)
|
||||
biquadConfig["q"] = q;
|
||||
if (currentConfig["bandwidth"] == bandwidth &&
|
||||
currentConfig["slope"] == slope && currentConfig["gain"] == gain &&
|
||||
currentConfig["frequency"] == frequency && currentConfig["q"] == q) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (type == "free")
|
||||
{
|
||||
biquadConfig["a1"] = config->getFloat("a1");
|
||||
biquadConfig["a2"] = config->getFloat("a2");
|
||||
biquadConfig["b0"] = config->getFloat("b0");
|
||||
biquadConfig["b1"] = config->getFloat("b1");
|
||||
biquadConfig["b2"] = config->getFloat("b2");
|
||||
}
|
||||
if (bandwidth != invalid)
|
||||
biquadConfig["bandwidth"] = bandwidth;
|
||||
if (slope != invalid)
|
||||
biquadConfig["slope"] = slope;
|
||||
if (gain != invalid)
|
||||
biquadConfig["gain"] = gain;
|
||||
if (frequency != invalid)
|
||||
biquadConfig["freq"] = frequency;
|
||||
if (q != invalid)
|
||||
biquadConfig["q"] = q;
|
||||
|
||||
auto typeElement = strMapType.find(type);
|
||||
if (typeElement != strMapType.end())
|
||||
{
|
||||
this->configure(typeElement->second, biquadConfig);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::invalid_argument("No biquad of type " + type);
|
||||
}
|
||||
}
|
||||
if (type == "free") {
|
||||
biquadConfig["a1"] = config->getFloat("a1");
|
||||
biquadConfig["a2"] = config->getFloat("a2");
|
||||
biquadConfig["b0"] = config->getFloat("b0");
|
||||
biquadConfig["b1"] = config->getFloat("b1");
|
||||
biquadConfig["b2"] = config->getFloat("b2");
|
||||
}
|
||||
|
||||
private:
|
||||
float coeffs[5];
|
||||
float w[2] = {1.0, 1.0};
|
||||
auto typeElement = strMapType.find(type);
|
||||
if (typeElement != strMapType.end()) {
|
||||
this->configure(typeElement->second, biquadConfig);
|
||||
} else {
|
||||
throw std::invalid_argument("No biquad of type " + type);
|
||||
}
|
||||
}
|
||||
|
||||
float sampleRate = 44100;
|
||||
private:
|
||||
float coeffs[5];
|
||||
float w[2] = {1.0, 1.0};
|
||||
|
||||
// Generator methods for different filter types
|
||||
void highPassCoEffs(float f, float q);
|
||||
void highPassFOCoEffs(float f);
|
||||
void lowPassCoEffs(float f, float q);
|
||||
void lowPassFOCoEffs(float f);
|
||||
float sampleRate = 44100;
|
||||
|
||||
void peakCoEffs(float f, float gain, float q);
|
||||
void peakCoEffsBandwidth(float f, float gain, float bandwidth);
|
||||
// Generator methods for different filter types
|
||||
void highPassCoEffs(float f, float q);
|
||||
void highPassFOCoEffs(float f);
|
||||
void lowPassCoEffs(float f, float q);
|
||||
void lowPassFOCoEffs(float f);
|
||||
|
||||
void highShelfCoEffs(float f, float gain, float q);
|
||||
void highShelfCoEffsSlope(float f, float gain, float slope);
|
||||
void highShelfFOCoEffs(float f, float gain);
|
||||
void peakCoEffs(float f, float gain, float q);
|
||||
void peakCoEffsBandwidth(float f, float gain, float bandwidth);
|
||||
|
||||
void lowShelfCoEffs(float f, float gain, float q);
|
||||
void lowShelfCoEffsSlope(float f, float gain, float slope);
|
||||
void lowShelfFOCoEffs(float f, float gain);
|
||||
void highShelfCoEffs(float f, float gain, float q);
|
||||
void highShelfCoEffsSlope(float f, float gain, float slope);
|
||||
void highShelfFOCoEffs(float f, float gain);
|
||||
|
||||
void notchCoEffs(float f, float gain, float q);
|
||||
void notchCoEffsBandwidth(float f, float gain, float bandwidth);
|
||||
void lowShelfCoEffs(float f, float gain, float q);
|
||||
void lowShelfCoEffsSlope(float f, float gain, float slope);
|
||||
void lowShelfFOCoEffs(float f, float gain);
|
||||
|
||||
void bandPassCoEffs(float f, float q);
|
||||
void bandPassCoEffsBandwidth(float f, float bandwidth);
|
||||
void notchCoEffs(float f, float gain, float q);
|
||||
void notchCoEffsBandwidth(float f, float gain, float bandwidth);
|
||||
|
||||
void allPassCoEffs(float f, float q);
|
||||
void allPassCoEffsBandwidth(float f, float bandwidth);
|
||||
void allPassFOCoEffs(float f);
|
||||
void bandPassCoEffs(float f, float q);
|
||||
void bandPassCoEffsBandwidth(float f, float bandwidth);
|
||||
|
||||
void normalizeCoEffs(float a0, float a1, float a2, float b0, float b1, float b2);
|
||||
};
|
||||
void allPassCoEffs(float f, float q);
|
||||
void allPassCoEffsBandwidth(float f, float bandwidth);
|
||||
void allPassFOCoEffs(float f);
|
||||
|
||||
}
|
||||
void normalizeCoEffs(float a0, float a1, float a2, float b0, float b1,
|
||||
float b2);
|
||||
};
|
||||
|
||||
} // namespace bell
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user