From aa4bcb14d0cbee910331545dd2fc086b58c37170 Mon Sep 17 00:00:00 2001 From: Kshitij Sisodia Date: Fri, 6 May 2022 09:13:03 +0100 Subject: MLECO-3183: Refactoring application sources Platform agnostic application sources are moved into application api module with their own independent CMake projects. Changes for MLECO-3080 also included - they create CMake projects individial API's (again, platform agnostic) that dependent on the common logic. The API for KWS_API "joint" API has been removed and now the use case relies on individual KWS, and ASR API libraries. Change-Id: I1f7748dc767abb3904634a04e0991b74ac7b756d Signed-off-by: Kshitij Sisodia --- source/application/api/common/CMakeLists.txt | 59 ++++ .../application/api/common/include/AudioUtils.hpp | 172 ++++++++++ .../api/common/include/BaseProcessing.hpp | 67 ++++ .../api/common/include/ClassificationResult.hpp | 41 +++ .../application/api/common/include/Classifier.hpp | 89 +++++ .../api/common/include/DataStructures.hpp | 128 ++++++++ .../application/api/common/include/ImageUtils.hpp | 116 +++++++ source/application/api/common/include/Mfcc.hpp | 255 +++++++++++++++ source/application/api/common/include/Model.hpp | 152 +++++++++ .../api/common/include/TensorFlowLiteMicro.hpp | 91 ++++++ source/application/api/common/source/Classifier.cc | 169 ++++++++++ source/application/api/common/source/ImageUtils.cc | 126 ++++++++ source/application/api/common/source/Mfcc.cc | 353 ++++++++++++++++++++ source/application/api/common/source/Model.cc | 359 +++++++++++++++++++++ .../api/common/source/TensorFlowLiteMicro.cc | 46 +++ 15 files changed, 2223 insertions(+) create mode 100644 source/application/api/common/CMakeLists.txt create mode 100644 source/application/api/common/include/AudioUtils.hpp create mode 100644 source/application/api/common/include/BaseProcessing.hpp create mode 100644 source/application/api/common/include/ClassificationResult.hpp create mode 100644 source/application/api/common/include/Classifier.hpp create mode 100644 source/application/api/common/include/DataStructures.hpp create mode 100644 source/application/api/common/include/ImageUtils.hpp create mode 100644 source/application/api/common/include/Mfcc.hpp create mode 100644 source/application/api/common/include/Model.hpp create mode 100644 source/application/api/common/include/TensorFlowLiteMicro.hpp create mode 100644 source/application/api/common/source/Classifier.cc create mode 100644 source/application/api/common/source/ImageUtils.cc create mode 100644 source/application/api/common/source/Mfcc.cc create mode 100644 source/application/api/common/source/Model.cc create mode 100644 source/application/api/common/source/TensorFlowLiteMicro.cc (limited to 'source/application/api/common') diff --git a/source/application/api/common/CMakeLists.txt b/source/application/api/common/CMakeLists.txt new file mode 100644 index 0000000..5078adc --- /dev/null +++ b/source/application/api/common/CMakeLists.txt @@ -0,0 +1,59 @@ +#---------------------------------------------------------------------------- +# Copyright (c) 2022 Arm Limited. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#---------------------------------------------------------------------------- + +######################################################### +# Common utility library used by use case libraries. # +# NOTE: this library should not depend on HAL. # +######################################################### + +cmake_minimum_required(VERSION 3.15.6) + +set(COMMON_UC_UTILS_TARGET common_api) +project(${COMMON_UC_UTILS_TARGET} + DESCRIPTION "Common Utilities library" + LANGUAGES CXX) + +# Create static library +add_library(${COMMON_UC_UTILS_TARGET} STATIC) + +## Include directories - public +target_include_directories(${COMMON_UC_UTILS_TARGET} + PUBLIC + include + ${TENSORFLOW_SRC_PATH}/tensorflow/lite/micro/tools/make/downloads/flatbuffers/include) + +## Sources +target_sources(${COMMON_UC_UTILS_TARGET} + PRIVATE + source/Classifier.cc + source/ImageUtils.cc + source/Mfcc.cc + source/Model.cc + source/TensorFlowLiteMicro.cc) + +# Link time library targets: +target_link_libraries(${COMMON_UC_UTILS_TARGET} + PUBLIC + log # Logging functions + arm_math # Math functions + tensorflow-lite-micro) # TensorFlow Lite Micro library + +# Display status: +message(STATUS "*******************************************************") +message(STATUS "Library : " ${COMMON_UC_UTILS_TARGET}) +message(STATUS "CMAKE_SYSTEM_PROCESSOR : " ${CMAKE_SYSTEM_PROCESSOR}) +message(STATUS "*******************************************************") diff --git a/source/application/api/common/include/AudioUtils.hpp b/source/application/api/common/include/AudioUtils.hpp new file mode 100644 index 0000000..cbf7bb7 --- /dev/null +++ b/source/application/api/common/include/AudioUtils.hpp @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef AUDIO_UTILS_HPP +#define AUDIO_UTILS_HPP + +#include +#include + +namespace arm { +namespace app { +namespace audio { + + template + class SlidingWindow { + public: + + /** + * @brief Creates the window slider through the given data. + * + * @param[in] data Pointer to the data to slide through. + * @param[in] dataSize Size in T type elements wise. + * @param[in] windowSize Sliding window size in T type wise elements. + * @param[in] stride Stride size in T type wise elements. + */ + SlidingWindow(T *data, size_t dataSize, + size_t windowSize, size_t stride) { + m_start = data; + m_dataSize = dataSize; + m_size = windowSize; + m_stride = stride; + } + + SlidingWindow() = default; + + ~SlidingWindow() = default; + + /** + * @brief Get the next data window. + * @return Pointer to the next window, if next window is not available nullptr is returned. + */ + virtual T *Next() { + if (HasNext()) { + m_count++; + return m_start + Index() * m_stride; + } else { + return nullptr; + } + } + + /** + * @brief Checks if the next data portion is available. + * @return true if next data portion is available. + */ + virtual bool HasNext() { + return m_size + m_count * m_stride <= m_dataSize; + } + + /** + * @brief Reset the slider to the initial position. + */ + virtual void Reset() { + m_count = 0; + } + + /** + * @brief Resets the slider to the start of the new data. + * New data size MUST be the same as the old one. + * @param[in] newStart Pointer to the new data to slide through. + */ + virtual void Reset(T *newStart) { + m_start = newStart; + Reset(); + } + + /** + * @brief Gets current index of the sliding window. + * @return Current position of the sliding window in number of strides. + */ + size_t Index() { + return m_count == 0? 0: m_count - 1; + } + + /** + * @brief Gets the index from the start of the data where the next window will begin. + * While Index() returns the index of sliding window itself this function + * returns the index of the data element itself. + * @return Index from the start of the data where the next sliding window will begin. + */ + virtual uint32_t NextWindowStartIndex() { + return m_count == 0? 0: ((m_count) * m_stride); + } + + /** + * @brief Go to given sliding window index. + * @param[in] index New position of the sliding window. If index is invalid + * (greater than possible range of strides) then next call to Next() will return nullptr. + */ + void FastForward(size_t index) { + m_count = index; + } + + /** + * @brief Calculates whole number of times the window can stride through the given data. + * @return Maximum number of whole strides. + */ + size_t TotalStrides() { + if (m_size > m_dataSize) { + return 0; + } + return ((m_dataSize - m_size)/m_stride); + } + + + protected: + T *m_start = nullptr; + size_t m_dataSize = 0; + size_t m_size = 0; + size_t m_stride = 0; + size_t m_count = 0; + }; + + /* + * Sliding window that will cover the whole length of the input, even if + * this means the last window is not a full window length. + */ + template + class FractionalSlidingWindow : public SlidingWindow { + public: + using SlidingWindow::SlidingWindow; + + /** + * @brief Checks if the next data portion is available. + * @return true if next data portion is available. + */ + bool HasNext() { + return this->m_count < 1 + this->FractionalTotalStrides() && (this->NextWindowStartIndex() < this->m_dataSize); + } + + /** + * @brief Calculates number of times the window can stride through the given data. + * May not be a whole number. + * @return Number of strides to cover all data. + */ + float FractionalTotalStrides() { + if (this->m_dataSize < this->m_size) { + return 0; + } else { + return ((this->m_dataSize - this->m_size) / static_cast(this->m_stride)); + } + } + }; + + +} /* namespace audio */ +} /* namespace app */ +} /* namespace arm */ + +#endif /* AUDIO_UTILS_HPP */ \ No newline at end of file diff --git a/source/application/api/common/include/BaseProcessing.hpp b/source/application/api/common/include/BaseProcessing.hpp new file mode 100644 index 0000000..a54dd12 --- /dev/null +++ b/source/application/api/common/include/BaseProcessing.hpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef BASE_PROCESSING_HPP +#define BASE_PROCESSING_HPP + +#include + +namespace arm { +namespace app { + + /** + * @brief Base class exposing pre-processing API. + * Use cases should provide their own PreProcessing class that inherits from this one. + * All steps required to take raw input data and populate tensors ready for inference + * should be handled. + */ + class BasePreProcess { + + public: + virtual ~BasePreProcess() = default; + + /** + * @brief Should perform pre-processing of 'raw' input data and load it into + * TFLite Micro input tensors ready for inference + * @param[in] input Pointer to the data that pre-processing will work on. + * @param[in] inputSize Size of the input data. + * @return true if successful, false otherwise. + **/ + virtual bool DoPreProcess(const void* input, size_t inputSize) = 0; + }; + + /** + * @brief Base class exposing post-processing API. + * Use cases should provide their own PostProcessing class that inherits from this one. + * All steps required to take inference output and populate results vectors should be handled. + */ + class BasePostProcess { + + public: + virtual ~BasePostProcess() = default; + + /** + * @brief Should perform post-processing of the result of inference then populate + * populate result data for any later use. + * @return true if successful, false otherwise. + **/ + virtual bool DoPostProcess() = 0; + }; + +} /* namespace app */ +} /* namespace arm */ + +#endif /* BASE_PROCESSING_HPP */ \ No newline at end of file diff --git a/source/application/api/common/include/ClassificationResult.hpp b/source/application/api/common/include/ClassificationResult.hpp new file mode 100644 index 0000000..eae28e4 --- /dev/null +++ b/source/application/api/common/include/ClassificationResult.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef CLASSIFICATION_RESULT_HPP +#define CLASSIFICATION_RESULT_HPP + +#include + +namespace arm { +namespace app { + + /** + * @brief Class representing a single classification result. + */ + class ClassificationResult { + public: + double m_normalisedVal = 0.0; + std::string m_label; + uint32_t m_labelIdx = 0; + + ClassificationResult() = default; + ~ClassificationResult() = default; + }; + +} /* namespace app */ +} /* namespace arm */ + +#endif /* CLASSIFICATION_RESULT_HPP */ \ No newline at end of file diff --git a/source/application/api/common/include/Classifier.hpp b/source/application/api/common/include/Classifier.hpp new file mode 100644 index 0000000..d641c22 --- /dev/null +++ b/source/application/api/common/include/Classifier.hpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef CLASSIFIER_HPP +#define CLASSIFIER_HPP + +#include "ClassificationResult.hpp" +#include "TensorFlowLiteMicro.hpp" + +#include + +namespace arm { +namespace app { + + /** + * @brief Classifier - a helper class to get certain number of top + * results from the output vector from a classification NN. + **/ + class Classifier{ + public: + /** @brief Constructor. */ + Classifier() = default; + + /** + * @brief Gets the top N classification results from the + * output vector. + * @param[in] outputTensor Inference output tensor from an NN model. + * @param[out] vecResults A vector of classification results. + * populated by this function. + * @param[in] labels Labels vector to match classified classes. + * @param[in] topNCount Number of top classifications to pick. Default is 1. + * @param[in] useSoftmax Whether Softmax normalisation should be applied to output. Default is false. + * @return true if successful, false otherwise. + **/ + + virtual bool GetClassificationResults( + TfLiteTensor* outputTensor, + std::vector& vecResults, + const std::vector & labels, uint32_t topNCount, + bool use_softmax); + + /** + * @brief Populate the elements of the Classification Result object. + * @param[in] topNSet Ordered set of top 5 output class scores and labels. + * @param[out] vecResults A vector of classification results. + * populated by this function. + * @param[in] labels Labels vector to match classified classes. + **/ + + void SetVectorResults( + std::set>& topNSet, + std::vector& vecResults, + const std::vector & labels); + + private: + /** + * @brief Utility function that gets the top N classification results from the + * output vector. + * @param[in] tensor Inference output tensor from an NN model. + * @param[out] vecResults A vector of classification results + * populated by this function. + * @param[in] topNCount Number of top classifications to pick. + * @param[in] labels Labels vector to match classified classes. + * @return true if successful, false otherwise. + **/ + + bool GetTopNResults(const std::vector& tensor, + std::vector& vecResults, + uint32_t topNCount, + const std::vector & labels); + }; + +} /* namespace app */ +} /* namespace arm */ + +#endif /* CLASSIFIER_HPP */ diff --git a/source/application/api/common/include/DataStructures.hpp b/source/application/api/common/include/DataStructures.hpp new file mode 100644 index 0000000..0616839 --- /dev/null +++ b/source/application/api/common/include/DataStructures.hpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef DATA_STRUCTURES_HPP +#define DATA_STRUCTURES_HPP + +#include + +namespace arm { +namespace app { + + /** + * Class Array2d is a data structure that represents a two dimensional array. + * The data is allocated in contiguous memory, arranged row-wise + * and individual elements can be accessed with the () operator. + * For example a two dimensional array D of size (M, N) can be accessed: + * + * _|<------------- col size = N -------->| + * | D(r=0, c=0) D(r=0, c=1)... D(r=0, c=N) + * | D(r=1, c=0) D(r=1, c=1)... D(r=1, c=N) + * | ... + * row size = M ... + * | ... + * _ D(r=M, c=0) D(r=M, c=1)... D(r=M, c=N) + * + */ + template + class Array2d { + public: + /** + * @brief Creates the array2d with the given sizes. + * @param[in] rows Number of rows. + * @param[in] cols Number of columns. + */ + Array2d(unsigned rows, unsigned cols): m_rows(rows), m_cols(cols) + { + if (rows == 0 || cols == 0) { + printf("Array2d constructor has 0 size.\n"); + m_data = nullptr; + return; + } + m_data = new T[rows * cols]; + } + + ~Array2d() + { + delete[] m_data; + } + + T& operator() (unsigned int row, unsigned int col) + { +#if defined(DEBUG) + if (row >= m_rows || col >= m_cols || m_data == nullptr) { + printf_err("Array2d subscript out of bounds.\n"); + } +#endif /* defined(DEBUG) */ + return m_data[m_cols * row + col]; + } + + T operator() (unsigned int row, unsigned int col) const + { +#if defined(DEBUG) + if (row >= m_rows || col >= m_cols || m_data == nullptr) { + printf_err("const Array2d subscript out of bounds.\n"); + } +#endif /* defined(DEBUG) */ + return m_data[m_cols * row + col]; + } + + /** + * @brief Gets rows number of the current array2d. + * @return Number of rows. + */ + size_t size(size_t dim) + { + switch (dim) + { + case 0: + return m_rows; + case 1: + return m_cols; + default: + return 0; + } + } + + /** + * @brief Gets the array2d total size. + */ + size_t totalSize() + { + return m_rows * m_cols; + } + + /** + * array2d iterator. + */ + using iterator=T*; + using const_iterator=T const*; + + iterator begin() { return m_data; } + iterator end() { return m_data + totalSize(); } + const_iterator begin() const { return m_data; } + const_iterator end() const { return m_data + totalSize(); }; + + private: + size_t m_rows; + size_t m_cols; + T* m_data; + }; + +} /* namespace app */ +} /* namespace arm */ + +#endif /* DATA_STRUCTURES_HPP */ \ No newline at end of file diff --git a/source/application/api/common/include/ImageUtils.hpp b/source/application/api/common/include/ImageUtils.hpp new file mode 100644 index 0000000..a8c7650 --- /dev/null +++ b/source/application/api/common/include/ImageUtils.hpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef IMAGE_UTILS_HPP +#define IMAGE_UTILS_HPP + +#include +#include +#include +#include + +/* Helper macro to convert RGB888 to RGB565 format. */ +#define RGB888_TO_RGB565(R8,G8,B8) ((((R8>>3) & 0x1F) << 11) | \ + (((G8>>2) & 0x3F) << 5) | \ + ((B8>>3) & 0x1F)) + +constexpr uint16_t COLOR_BLACK = 0; +constexpr uint16_t COLOR_GREEN = RGB888_TO_RGB565( 0, 255, 0); // 2016; +constexpr uint16_t COLOR_YELLOW = RGB888_TO_RGB565(255, 255, 0); // 65504; + + +namespace arm { +namespace app { +namespace image { + + /** + * Contains the x,y co-ordinates of a box centre along with the box width and height. + */ + struct Box { + float x; + float y; + float w; + float h; + }; + + struct Detection { + Box bbox; + std::vector prob; + float objectness; + }; + + /** + * @brief Calculate the 1D overlap. + * @param[in] x1Center First center point. + * @param[in] width1 First width. + * @param[in] x2Center Second center point. + * @param[in] width2 Second width. + * @return The overlap between the two lines. + **/ + float Calculate1DOverlap(float x1Center, float width1, float x2Center, float width2); + + /** + * @brief Calculate the intersection between the two given boxes. + * @param[in] box1 First box. + * @param[in] box2 Second box. + * @return The intersection value. + **/ + float CalculateBoxIntersect(Box& box1, Box& box2); + + /** + * @brief Calculate the union between the two given boxes. + * @param[in] box1 First box. + * @param[in] box2 Second box. + * @return The two given boxes union value. + **/ + float CalculateBoxUnion(Box& box1, Box& box2); + + /** + * @brief Calculate the intersection over union between the two given boxes. + * @param[in] box1 First box. + * @param[in] box2 Second box. + * @return The intersection over union value. + **/ + float CalculateBoxIOU(Box& box1, Box& box2); + + /** + * @brief Calculate the Non-Maxima suppression on the given detection boxes. + * @param[in] detections List of Detection boxes. + * @param[in] classes Number of classes. + * @param[in] iouThreshold Intersection over union threshold. + **/ + void CalculateNMS(std::forward_list& detections, int classes, float iouThreshold); + + /** + * @brief Helper function to convert a UINT8 image to INT8 format. + * @param[in,out] data Pointer to the data start. + * @param[in] kMaxImageSize Total number of pixels in the image. + **/ + void ConvertImgToInt8(void* data, size_t kMaxImageSize); + + /** + * @brief Converts RGB image to grayscale. + * @param[in] srcPtr Pointer to RGB source image. + * @param[out] dstPtr Pointer to grayscale destination image. + * @param[in] imgSz Destination image size. + **/ + void RgbToGrayscale(const uint8_t* srcPtr, uint8_t* dstPtr, size_t dstImgSz); + +} /* namespace image */ +} /* namespace app */ +} /* namespace arm */ + +#endif /* IMAGE_UTILS_HPP */ \ No newline at end of file diff --git a/source/application/api/common/include/Mfcc.hpp b/source/application/api/common/include/Mfcc.hpp new file mode 100644 index 0000000..86330ca --- /dev/null +++ b/source/application/api/common/include/Mfcc.hpp @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MFCC_HPP +#define MFCC_HPP + +#include "PlatformMath.hpp" + +#include +#include +#include +#include +#include + +namespace arm { +namespace app { +namespace audio { + + /* MFCC's consolidated parameters. */ + class MfccParams { + public: + float m_samplingFreq; + uint32_t m_numFbankBins; + float m_melLoFreq; + float m_melHiFreq; + uint32_t m_numMfccFeatures; + uint32_t m_frameLen; + uint32_t m_frameLenPadded; + bool m_useHtkMethod; + + /** @brief Constructor */ + MfccParams(float samplingFreq, uint32_t numFbankBins, + float melLoFreq, float melHiFreq, + uint32_t numMfccFeats, uint32_t frameLen, + bool useHtkMethod); + + MfccParams() = delete; + + ~MfccParams() = default; + + /** @brief Log parameters */ + void Log() const; + }; + + /** + * @brief Class for MFCC feature extraction. + * Based on https://github.com/ARM-software/ML-KWS-for-MCU/blob/master/Deployment/Source/MFCC/mfcc.cpp + * This class is designed to be generic and self-sufficient but + * certain calculation routines can be overridden to accommodate + * use-case specific requirements. + */ + class MFCC { + public: + /** + * @brief Constructor + * @param[in] params MFCC parameters + */ + explicit MFCC(const MfccParams& params); + + MFCC() = delete; + + ~MFCC() = default; + + /** + * @brief Extract MFCC features for one single small frame of + * audio data e.g. 640 samples. + * @param[in] audioData Vector of audio samples to calculate + * features for. + * @return Vector of extracted MFCC features. + **/ + std::vector MfccCompute(const std::vector& audioData); + + /** @brief Initialise. */ + void Init(); + + /** + * @brief Extract MFCC features and quantise for one single small + * frame of audio data e.g. 640 samples. + * @param[in] audioData Vector of audio samples to calculate + * features for. + * @param[in] quantScale Quantisation scale. + * @param[in] quantOffset Quantisation offset. + * @return Vector of extracted quantised MFCC features. + **/ + template + std::vector MfccComputeQuant(const std::vector& audioData, + const float quantScale, + const int quantOffset) + { + this->MfccComputePreFeature(audioData); + float minVal = std::numeric_limits::min(); + float maxVal = std::numeric_limits::max(); + + std::vector mfccOut(this->m_params.m_numMfccFeatures); + const size_t numFbankBins = this->m_params.m_numFbankBins; + + /* Take DCT. Uses matrix mul. */ + for (size_t i = 0, j = 0; i < mfccOut.size(); ++i, j += numFbankBins) { + float sum = 0; + for (size_t k = 0; k < numFbankBins; ++k) { + sum += this->m_dctMatrix[j + k] * this->m_melEnergies[k]; + } + /* Quantize to T. */ + sum = std::round((sum / quantScale) + quantOffset); + mfccOut[i] = static_cast(std::min(std::max(sum, minVal), maxVal)); + } + + return mfccOut; + } + + /* Constants */ + static constexpr float ms_logStep = /*logf(6.4)*/ 1.8562979903656 / 27.0; + static constexpr float ms_freqStep = 200.0 / 3; + static constexpr float ms_minLogHz = 1000.0; + static constexpr float ms_minLogMel = ms_minLogHz / ms_freqStep; + + protected: + /** + * @brief Project input frequency to Mel Scale. + * @param[in] freq Input frequency in floating point. + * @param[in] useHTKMethod bool to signal if HTK method is to be + * used for calculation. + * @return Mel transformed frequency in floating point. + **/ + static float MelScale(float freq, + bool useHTKMethod = true); + + /** + * @brief Inverse Mel transform - convert MEL warped frequency + * back to normal frequency. + * @param[in] melFreq Mel frequency in floating point. + * @param[in] useHTKMethod bool to signal if HTK method is to be + * used for calculation. + * @return Real world frequency in floating point. + **/ + static float InverseMelScale(float melFreq, + bool useHTKMethod = true); + + /** + * @brief Populates MEL energies after applying the MEL filter + * bank weights and adding them up to be placed into + * bins, according to the filter bank's first and last + * indices (pre-computed for each filter bank element + * by CreateMelFilterBank function). + * @param[in] fftVec Vector populated with FFT magnitudes. + * @param[in] melFilterBank 2D Vector with filter bank weights. + * @param[in] filterBankFilterFirst Vector containing the first indices of filter bank + * to be used for each bin. + * @param[in] filterBankFilterLast Vector containing the last indices of filter bank + * to be used for each bin. + * @param[out] melEnergies Pre-allocated vector of MEL energies to be + * populated. + * @return true if successful, false otherwise. + */ + virtual bool ApplyMelFilterBank( + std::vector& fftVec, + std::vector>& melFilterBank, + std::vector& filterBankFilterFirst, + std::vector& filterBankFilterLast, + std::vector& melEnergies); + + /** + * @brief Converts the Mel energies for logarithmic scale. + * @param[in,out] melEnergies 1D vector of Mel energies. + **/ + virtual void ConvertToLogarithmicScale(std::vector& melEnergies); + + /** + * @brief Create a matrix used to calculate Discrete Cosine + * Transform. + * @param[in] inputLength Input length of the buffer on which + * DCT will be performed. + * @param[in] coefficientCount Total coefficients per input length. + * @return 1D vector with inputLength x coefficientCount elements + * populated with DCT coefficients. + */ + virtual std::vector CreateDCTMatrix( + int32_t inputLength, + int32_t coefficientCount); + + /** + * @brief Given the low and high Mel values, get the normaliser + * for weights to be applied when populating the filter + * bank. + * @param[in] leftMel Low Mel frequency value. + * @param[in] rightMel High Mel frequency value. + * @param[in] useHTKMethod bool to signal if HTK method is to be + * used for calculation. + * @return Value to use for normalizing. + */ + virtual float GetMelFilterBankNormaliser( + const float& leftMel, + const float& rightMel, + bool useHTKMethod); + + private: + MfccParams m_params; + std::vector m_frame; + std::vector m_buffer; + std::vector m_melEnergies; + std::vector m_windowFunc; + std::vector> m_melFilterBank; + std::vector m_dctMatrix; + std::vector m_filterBankFilterFirst; + std::vector m_filterBankFilterLast; + bool m_filterBankInitialised; + arm::app::math::FftInstance m_fftInstance; + + /** + * @brief Initialises the filter banks and the DCT matrix. **/ + void InitMelFilterBank(); + + /** + * @brief Signals whether the instance of MFCC has had its + * required buffers initialised. + * @return true if initialised, false otherwise. + **/ + bool IsMelFilterBankInited() const; + + /** + * @brief Create mel filter banks for MFCC calculation. + * @return 2D vector of floats. + **/ + std::vector> CreateMelFilterBank(); + + /** + * @brief Computes and populates internal memeber buffers used + * in MFCC feature calculation + * @param[in] audioData 1D vector of 16-bit audio data. + */ + void MfccComputePreFeature(const std::vector& audioData); + + /** @brief Computes the magnitude from an interleaved complex array. */ + void ConvertToPowerSpectrum(); + + }; + +} /* namespace audio */ +} /* namespace app */ +} /* namespace arm */ + +#endif /* MFCC_HPP */ \ No newline at end of file diff --git a/source/application/api/common/include/Model.hpp b/source/application/api/common/include/Model.hpp new file mode 100644 index 0000000..df1b259 --- /dev/null +++ b/source/application/api/common/include/Model.hpp @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2021-2022 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MODEL_HPP +#define MODEL_HPP + +#include "TensorFlowLiteMicro.hpp" + +#include + +namespace arm { +namespace app { + + /** + * @brief NN model class wrapping the underlying TensorFlow-Lite-Micro API. + */ + class Model { + public: + /** @brief Constructor. */ + Model(); + + /** @brief Destructor. */ + ~Model(); + + /** @brief Gets the pointer to the model's input tensor at given input index. */ + TfLiteTensor* GetInputTensor(size_t index) const; + + /** @brief Gets the pointer to the model's output tensor at given output index. */ + TfLiteTensor* GetOutputTensor(size_t index) const; + + /** @brief Gets the model's data type. */ + TfLiteType GetType() const; + + /** @brief Gets the pointer to the model's input shape. */ + TfLiteIntArray* GetInputShape(size_t index) const; + + /** @brief Gets the pointer to the model's output shape at given output index. */ + TfLiteIntArray* GetOutputShape(size_t index) const; + + /** @brief Gets the number of input tensors the model has. */ + size_t GetNumInputs() const; + + /** @brief Gets the number of output tensors the model has. */ + size_t GetNumOutputs() const; + + /** @brief Logs the tensor information to stdout. */ + void LogTensorInfo(TfLiteTensor* tensor); + + /** @brief Logs the interpreter information to stdout. */ + void LogInterpreterInfo(); + + /** @brief Initialise the model class object. + * @param[in] tensorArenaAddress Pointer to the tensor arena buffer. + * @param[in] tensorArenaAddress Size of the tensor arena buffer in bytes. + * @param[in] nnModelAddr Pointer to the model. + * @param[in] nnModelSize Size of the model in bytes, if known. + * @param[in] allocator Optional: a pre-initialised micro allocator pointer, + * if available. If supplied, this allocator will be used + * to create the interpreter instance. + * @return true if initialisation succeeds, false otherwise. + **/ + bool Init(uint8_t* tensorArenaAddr, + uint32_t tensorArenaSize, + uint8_t* nnModelAddr, + uint32_t nnModelSize, + tflite::MicroAllocator* allocator = nullptr); + + /** + * @brief Gets the allocator pointer for this instance. + * @return Pointer to a tflite::MicroAllocator object, if + * available; nullptr otherwise. + **/ + tflite::MicroAllocator* GetAllocator(); + + /** @brief Checks if this object has been initialised. */ + bool IsInited() const; + + /** @brief Checks if the model uses signed data. */ + bool IsDataSigned() const; + + /** @brief Checks if the model uses Ethos-U operator */ + bool ContainsEthosUOperator() const; + + /** @brief Runs the inference (invokes the interpreter). */ + virtual bool RunInference(); + + /** @brief Model information handler common to all models. + * @return true or false based on execution success. + **/ + bool ShowModelInfoHandler(); + + /** @brief Gets a pointer to the tensor arena. */ + uint8_t* GetTensorArena(); + + protected: + /** @brief Gets the pointer to the NN model data array. + * @return Pointer of uint8_t type. + **/ + const uint8_t* ModelPointer(); + + /** @brief Gets the model size. + * @return size_t, size in bytes. + **/ + uint32_t ModelSize(); + + /** + * @brief Gets the op resolver for the model instance. + * @return const reference to a tflite::MicroOpResolver object. + **/ + virtual const tflite::MicroOpResolver& GetOpResolver() = 0; + + /** + * @brief Add all the operators required for the given model. + * Implementation of this should come from the use case. + * @return true is ops are successfully added, false otherwise. + **/ + virtual bool EnlistOperations() = 0; + + /** @brief Gets the total size of tensor arena available for use. */ + size_t GetActivationBufferSize(); + + private: + tflite::ErrorReporter* m_pErrorReporter = nullptr; /* Pointer to the error reporter. */ + const tflite::Model* m_pModel = nullptr; /* Tflite model pointer. */ + tflite::MicroInterpreter* m_pInterpreter = nullptr; /* Tflite interpreter. */ + tflite::MicroAllocator* m_pAllocator = nullptr; /* Tflite micro allocator. */ + bool m_inited = false; /* Indicates whether this object has been initialised. */ + uint8_t* m_modelAddr = nullptr; /* Model address */ + uint32_t m_modelSize = 0; /* Model size */ + + std::vector m_input = {}; /* Model's input tensor pointers. */ + std::vector m_output = {}; /* Model's output tensor pointers. */ + TfLiteType m_type = kTfLiteNoType;/* Model's data type. */ + }; + +} /* namespace app */ +} /* namespace arm */ + +#endif /* MODEL_HPP */ diff --git a/source/application/api/common/include/TensorFlowLiteMicro.hpp b/source/application/api/common/include/TensorFlowLiteMicro.hpp new file mode 100644 index 0000000..f6639fd --- /dev/null +++ b/source/application/api/common/include/TensorFlowLiteMicro.hpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef TENSORFLOW_LITE_MICRO_LOCAL_HPP +#define TENSORFLOW_LITE_MICRO_LOCAL_HPP + +/* We include all our TensorFlow Lite Micro headers here */ + +/** + * TensorFlow Lite Micro sources can generate a lot of warnings from the usage + * of a single macro (TF_LITE_REMOVE_VIRTUAL_DELETE). Suppress the known ones + * here to prevent them from masking warnings that might be generated by our + * application sources. + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wunused-parameter" + #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" + #include "tensorflow/lite/micro/micro_interpreter.h" + #include "tensorflow/lite/micro/micro_error_reporter.h" + #include "tensorflow/lite/micro/all_ops_resolver.h" + #pragma clang diagnostic pop +#elif defined(__GNUC__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wunused-parameter" + #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" + #include "tensorflow/lite/micro/micro_interpreter.h" + #include "tensorflow/lite/micro/micro_error_reporter.h" + #include "tensorflow/lite/micro/all_ops_resolver.h" + #pragma GCC diagnostic pop +#else + #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" + #include "tensorflow/lite/micro/micro_interpreter.h" + #include "tensorflow/lite/micro/micro_error_reporter.h" + #include "tensorflow/lite/micro/all_ops_resolver.h" +#endif + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/kernels/micro_ops.h" +#include "tensorflow/lite/schema/schema_generated.h" +#include "tensorflow/lite/schema/schema_utils.h" + +#if defined (TESTS) + #include "tensorflow/lite/micro/test_helpers.h" +#endif /* defined (TESTS) */ + +namespace arm { +namespace app { + + /** Struct for quantization parameters. */ + struct QuantParams { + float scale = 1.0; + int offset = 0; + }; + + /** + * @brief Gets the quantization parameters from a tensor + * @param[in] tensor pointer to the tensor. + * @return QuantParams object. + */ + QuantParams GetTensorQuantParams(TfLiteTensor* tensor); + + /** + * @brief String logging functionality expected to be defined + * by TensorFlow Lite Micro's error reporter. + * @param[in] s Pointer to the string. + */ + extern "C" void DebugLog(const char* s); + +} /* namespace app */ +} /* namespace arm */ + +/** + * @brief Prints the tensor flow version in use to stdout. + */ +void PrintTensorFlowVersion(); + +#endif /* TENSORFLOW_LITE_MICRO_LOCAL_HPP */ diff --git a/source/application/api/common/source/Classifier.cc b/source/application/api/common/source/Classifier.cc new file mode 100644 index 0000000..6fabebe --- /dev/null +++ b/source/application/api/common/source/Classifier.cc @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "Classifier.hpp" + +#include "TensorFlowLiteMicro.hpp" +#include "PlatformMath.hpp" +#include "log_macros.h" + +#include +#include +#include +#include +#include + + +namespace arm { +namespace app { + + void Classifier::SetVectorResults(std::set>& topNSet, + std::vector& vecResults, + const std::vector & labels) + { + + /* Reset the iterator to the largest element - use reverse iterator. */ + + auto topNIter = topNSet.rbegin(); + for (size_t i = 0; i < vecResults.size() && topNIter != topNSet.rend(); ++i, ++topNIter) { + vecResults[i].m_normalisedVal = topNIter->first; + vecResults[i].m_label = labels[topNIter->second]; + vecResults[i].m_labelIdx = topNIter->second; + } + } + + bool Classifier::GetTopNResults(const std::vector& tensor, + std::vector& vecResults, + uint32_t topNCount, + const std::vector & labels) + { + + std::set> sortedSet; + + /* NOTE: inputVec's size verification against labels should be + * checked by the calling/public function. */ + + /* Set initial elements. */ + for (uint32_t i = 0; i < topNCount; ++i) { + sortedSet.insert({tensor[i], i}); + } + + /* Initialise iterator. */ + auto setFwdIter = sortedSet.begin(); + + /* Scan through the rest of elements with compare operations. */ + for (uint32_t i = topNCount; i < labels.size(); ++i) { + if (setFwdIter->first < tensor[i]) { + sortedSet.erase(*setFwdIter); + sortedSet.insert({tensor[i], i}); + setFwdIter = sortedSet.begin(); + } + } + + /* Final results' container. */ + vecResults = std::vector(topNCount); + SetVectorResults(sortedSet, vecResults, labels); + + return true; + } + + bool Classifier::GetClassificationResults( + TfLiteTensor* outputTensor, + std::vector& vecResults, + const std::vector & labels, + uint32_t topNCount, + bool useSoftmax) + { + if (outputTensor == nullptr) { + printf_err("Output vector is null pointer.\n"); + return false; + } + + uint32_t totalOutputSize = 1; + for (int inputDim = 0; inputDim < outputTensor->dims->size; inputDim++) { + totalOutputSize *= outputTensor->dims->data[inputDim]; + } + + /* Sanity checks. */ + if (totalOutputSize < topNCount) { + printf_err("Output vector is smaller than %" PRIu32 "\n", topNCount); + return false; + } else if (totalOutputSize != labels.size()) { + printf_err("Output size doesn't match the labels' size\n"); + return false; + } else if (topNCount == 0) { + printf_err("Top N results cannot be zero\n"); + return false; + } + + bool resultState; + vecResults.clear(); + + /* De-Quantize Output Tensor */ + QuantParams quantParams = GetTensorQuantParams(outputTensor); + + /* Floating point tensor data to be populated + * NOTE: The assumption here is that the output tensor size isn't too + * big and therefore, there's neglibible impact on heap usage. */ + std::vector tensorData(totalOutputSize); + + /* Populate the floating point buffer */ + switch (outputTensor->type) { + case kTfLiteUInt8: { + uint8_t *tensor_buffer = tflite::GetTensorData(outputTensor); + for (size_t i = 0; i < totalOutputSize; ++i) { + tensorData[i] = quantParams.scale * + (static_cast(tensor_buffer[i]) - quantParams.offset); + } + break; + } + case kTfLiteInt8: { + int8_t *tensor_buffer = tflite::GetTensorData(outputTensor); + for (size_t i = 0; i < totalOutputSize; ++i) { + tensorData[i] = quantParams.scale * + (static_cast(tensor_buffer[i]) - quantParams.offset); + } + break; + } + case kTfLiteFloat32: { + float *tensor_buffer = tflite::GetTensorData(outputTensor); + for (size_t i = 0; i < totalOutputSize; ++i) { + tensorData[i] = tensor_buffer[i]; + } + break; + } + default: + printf_err("Tensor type %s not supported by classifier\n", + TfLiteTypeGetName(outputTensor->type)); + return false; + } + + if (useSoftmax) { + math::MathUtils::SoftmaxF32(tensorData); + } + + /* Get the top N results. */ + resultState = GetTopNResults(tensorData, vecResults, topNCount, labels); + + if (!resultState) { + printf_err("Failed to get top N results set\n"); + return false; + } + + return true; + } +} /* namespace app */ +} /* namespace arm */ \ No newline at end of file diff --git a/source/application/api/common/source/ImageUtils.cc b/source/application/api/common/source/ImageUtils.cc new file mode 100644 index 0000000..31b9493 --- /dev/null +++ b/source/application/api/common/source/ImageUtils.cc @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ImageUtils.hpp" + +#include + +namespace arm { +namespace app { +namespace image { + + float Calculate1DOverlap(float x1Center, float width1, float x2Center, float width2) + { + float left_1 = x1Center - width1/2; + float left_2 = x2Center - width2/2; + float leftest = left_1 > left_2 ? left_1 : left_2; + + float right_1 = x1Center + width1/2; + float right_2 = x2Center + width2/2; + float rightest = right_1 < right_2 ? right_1 : right_2; + + return rightest - leftest; + } + + float CalculateBoxIntersect(Box& box1, Box& box2) + { + float width = Calculate1DOverlap(box1.x, box1.w, box2.x, box2.w); + if (width < 0) { + return 0; + } + float height = Calculate1DOverlap(box1.y, box1.h, box2.y, box2.h); + if (height < 0) { + return 0; + } + + float total_area = width*height; + return total_area; + } + + float CalculateBoxUnion(Box& box1, Box& box2) + { + float boxes_intersection = CalculateBoxIntersect(box1, box2); + float boxes_union = box1.w * box1.h + box2.w * box2.h - boxes_intersection; + return boxes_union; + } + + float CalculateBoxIOU(Box& box1, Box& box2) + { + float boxes_intersection = CalculateBoxIntersect(box1, box2); + if (boxes_intersection == 0) { + return 0; + } + + float boxes_union = CalculateBoxUnion(box1, box2); + if (boxes_union == 0) { + return 0; + } + + return boxes_intersection / boxes_union; + } + + void CalculateNMS(std::forward_list& detections, int classes, float iouThreshold) + { + int idxClass{0}; + auto CompareProbs = [idxClass](Detection& prob1, Detection& prob2) { + return prob1.prob[idxClass] > prob2.prob[idxClass]; + }; + + for (idxClass = 0; idxClass < classes; ++idxClass) { + detections.sort(CompareProbs); + + for (auto it=detections.begin(); it != detections.end(); ++it) { + if (it->prob[idxClass] == 0) continue; + for (auto itc=std::next(it, 1); itc != detections.end(); ++itc) { + if (itc->prob[idxClass] == 0) { + continue; + } + if (CalculateBoxIOU(it->bbox, itc->bbox) > iouThreshold) { + itc->prob[idxClass] = 0; + } + } + } + } + } + + void ConvertImgToInt8(void* data, const size_t kMaxImageSize) + { + auto* tmp_req_data = static_cast(data); + auto* tmp_signed_req_data = static_cast(data); + + for (size_t i = 0; i < kMaxImageSize; i++) { + tmp_signed_req_data[i] = (int8_t) ( + (int32_t) (tmp_req_data[i]) - 128); + } + } + + void RgbToGrayscale(const uint8_t* srcPtr, uint8_t* dstPtr, const size_t dstImgSz) + { + const float R = 0.299; + const float G = 0.587; + const float B = 0.114; + for (size_t i = 0; i < dstImgSz; ++i, srcPtr += 3) { + uint32_t int_gray = R * (*srcPtr) + + G * (*(srcPtr + 1)) + + B * (*(srcPtr + 2)); + *dstPtr++ = int_gray <= std::numeric_limits::max() ? + int_gray : std::numeric_limits::max(); + } + } + +} /* namespace image */ +} /* namespace app */ +} /* namespace arm */ \ No newline at end of file diff --git a/source/application/api/common/source/Mfcc.cc b/source/application/api/common/source/Mfcc.cc new file mode 100644 index 0000000..3bf5eb3 --- /dev/null +++ b/source/application/api/common/source/Mfcc.cc @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "Mfcc.hpp" +#include "PlatformMath.hpp" +#include "log_macros.h" + +#include +#include + +namespace arm { +namespace app { +namespace audio { + + MfccParams::MfccParams( + const float samplingFreq, + const uint32_t numFbankBins, + const float melLoFreq, + const float melHiFreq, + const uint32_t numMfccFeats, + const uint32_t frameLen, + const bool useHtkMethod): + m_samplingFreq(samplingFreq), + m_numFbankBins(numFbankBins), + m_melLoFreq(melLoFreq), + m_melHiFreq(melHiFreq), + m_numMfccFeatures(numMfccFeats), + m_frameLen(frameLen), + + /* Smallest power of 2 >= frame length. */ + m_frameLenPadded(pow(2, ceil((log(frameLen)/log(2))))), + m_useHtkMethod(useHtkMethod) + {} + + void MfccParams::Log() const + { + debug("MFCC parameters:\n"); + debug("\t Sampling frequency: %f\n", this->m_samplingFreq); + debug("\t Number of filter banks: %" PRIu32 "\n", this->m_numFbankBins); + debug("\t Mel frequency limit (low): %f\n", this->m_melLoFreq); + debug("\t Mel frequency limit (high): %f\n", this->m_melHiFreq); + debug("\t Number of MFCC features: %" PRIu32 "\n", this->m_numMfccFeatures); + debug("\t Frame length: %" PRIu32 "\n", this->m_frameLen); + debug("\t Padded frame length: %" PRIu32 "\n", this->m_frameLenPadded); + debug("\t Using HTK for Mel scale: %s\n", this->m_useHtkMethod ? "yes" : "no"); + } + + MFCC::MFCC(const MfccParams& params): + m_params(params), + m_filterBankInitialised(false) + { + this->m_buffer = std::vector( + this->m_params.m_frameLenPadded, 0.0); + this->m_frame = std::vector( + this->m_params.m_frameLenPadded, 0.0); + this->m_melEnergies = std::vector( + this->m_params.m_numFbankBins, 0.0); + + this->m_windowFunc = std::vector(this->m_params.m_frameLen); + const auto multiplier = static_cast(2 * M_PI / this->m_params.m_frameLen); + + /* Create window function. */ + for (size_t i = 0; i < this->m_params.m_frameLen; i++) { + this->m_windowFunc[i] = (0.5 - (0.5 * + math::MathUtils::CosineF32(static_cast(i) * multiplier))); + } + + math::MathUtils::FftInitF32(this->m_params.m_frameLenPadded, this->m_fftInstance); + this->m_params.Log(); + } + + void MFCC::Init() + { + this->InitMelFilterBank(); + } + + float MFCC::MelScale(const float freq, const bool useHTKMethod) + { + if (useHTKMethod) { + return 1127.0f * logf (1.0f + freq / 700.0f); + } else { + /* Slaney formula for mel scale. */ + + float mel = freq / ms_freqStep; + + if (freq >= ms_minLogHz) { + mel = ms_minLogMel + logf(freq / ms_minLogHz) / ms_logStep; + } + return mel; + } + } + + float MFCC::InverseMelScale(const float melFreq, const bool useHTKMethod) + { + if (useHTKMethod) { + return 700.0f * (expf (melFreq / 1127.0f) - 1.0f); + } else { + /* Slaney formula for mel scale. */ + float freq = ms_freqStep * melFreq; + + if (melFreq >= ms_minLogMel) { + freq = ms_minLogHz * expf(ms_logStep * (melFreq - ms_minLogMel)); + } + return freq; + } + } + + + bool MFCC::ApplyMelFilterBank( + std::vector& fftVec, + std::vector>& melFilterBank, + std::vector& filterBankFilterFirst, + std::vector& filterBankFilterLast, + std::vector& melEnergies) + { + const size_t numBanks = melEnergies.size(); + + if (numBanks != filterBankFilterFirst.size() || + numBanks != filterBankFilterLast.size()) { + printf_err("unexpected filter bank lengths\n"); + return false; + } + + for (size_t bin = 0; bin < numBanks; ++bin) { + auto filterBankIter = melFilterBank[bin].begin(); + auto end = melFilterBank[bin].end(); + float melEnergy = FLT_MIN; /* Avoid log of zero at later stages */ + const uint32_t firstIndex = filterBankFilterFirst[bin]; + const uint32_t lastIndex = std::min(filterBankFilterLast[bin], fftVec.size() - 1); + + for (uint32_t i = firstIndex; i <= lastIndex && filterBankIter != end; i++) { + float energyRep = math::MathUtils::SqrtF32(fftVec[i]); + melEnergy += (*filterBankIter++ * energyRep); + } + + melEnergies[bin] = melEnergy; + } + + return true; + } + + void MFCC::ConvertToLogarithmicScale(std::vector& melEnergies) + { + for (float& melEnergy : melEnergies) { + melEnergy = logf(melEnergy); + } + } + + void MFCC::ConvertToPowerSpectrum() + { + const uint32_t halfDim = this->m_buffer.size() / 2; + + /* Handle this special case. */ + float firstEnergy = this->m_buffer[0] * this->m_buffer[0]; + float lastEnergy = this->m_buffer[1] * this->m_buffer[1]; + + math::MathUtils::ComplexMagnitudeSquaredF32( + this->m_buffer.data(), + this->m_buffer.size(), + this->m_buffer.data(), + this->m_buffer.size()/2); + + this->m_buffer[0] = firstEnergy; + this->m_buffer[halfDim] = lastEnergy; + } + + std::vector MFCC::CreateDCTMatrix( + const int32_t inputLength, + const int32_t coefficientCount) + { + std::vector dctMatix(inputLength * coefficientCount); + + const float normalizer = math::MathUtils::SqrtF32(2.0f/inputLength); + const float angleIncr = M_PI/inputLength; + float angle = 0; + + for (int32_t k = 0, m = 0; k < coefficientCount; k++, m += inputLength) { + for (int32_t n = 0; n < inputLength; n++) { + dctMatix[m+n] = normalizer * + math::MathUtils::CosineF32((n + 0.5f) * angle); + } + angle += angleIncr; + } + + return dctMatix; + } + + float MFCC::GetMelFilterBankNormaliser( + const float& leftMel, + const float& rightMel, + const bool useHTKMethod) + { + UNUSED(leftMel); + UNUSED(rightMel); + UNUSED(useHTKMethod); + + /* By default, no normalisation => return 1 */ + return 1.f; + } + + void MFCC::InitMelFilterBank() + { + if (!this->IsMelFilterBankInited()) { + this->m_melFilterBank = this->CreateMelFilterBank(); + this->m_dctMatrix = this->CreateDCTMatrix( + this->m_params.m_numFbankBins, + this->m_params.m_numMfccFeatures); + this->m_filterBankInitialised = true; + } + } + + bool MFCC::IsMelFilterBankInited() const + { + return this->m_filterBankInitialised; + } + + void MFCC::MfccComputePreFeature(const std::vector& audioData) + { + this->InitMelFilterBank(); + + /* TensorFlow way of normalizing .wav data to (-1, 1). */ + constexpr float normaliser = 1.0/(1u<<15u); + for (size_t i = 0; i < this->m_params.m_frameLen; i++) { + this->m_frame[i] = static_cast(audioData[i]) * normaliser; + } + + /* Apply window function to input frame. */ + for(size_t i = 0; i < this->m_params.m_frameLen; i++) { + this->m_frame[i] *= this->m_windowFunc[i]; + } + + /* Set remaining frame values to 0. */ + std::fill(this->m_frame.begin() + this->m_params.m_frameLen,this->m_frame.end(), 0); + + /* Compute FFT. */ + math::MathUtils::FftF32(this->m_frame, this->m_buffer, this->m_fftInstance); + + /* Convert to power spectrum. */ + this->ConvertToPowerSpectrum(); + + /* Apply mel filterbanks. */ + if (!this->ApplyMelFilterBank(this->m_buffer, + this->m_melFilterBank, + this->m_filterBankFilterFirst, + this->m_filterBankFilterLast, + this->m_melEnergies)) { + printf_err("Failed to apply MEL filter banks\n"); + } + + /* Convert to logarithmic scale. */ + this->ConvertToLogarithmicScale(this->m_melEnergies); + } + + std::vector MFCC::MfccCompute(const std::vector& audioData) + { + this->MfccComputePreFeature(audioData); + + std::vector mfccOut(this->m_params.m_numMfccFeatures); + + float * ptrMel = this->m_melEnergies.data(); + float * ptrDct = this->m_dctMatrix.data(); + float * ptrMfcc = mfccOut.data(); + + /* Take DCT. Uses matrix mul. */ + for (size_t i = 0, j = 0; i < mfccOut.size(); + ++i, j += this->m_params.m_numFbankBins) { + *ptrMfcc++ = math::MathUtils::DotProductF32( + ptrDct + j, + ptrMel, + this->m_params.m_numFbankBins); + } + return mfccOut; + } + + std::vector> MFCC::CreateMelFilterBank() + { + size_t numFftBins = this->m_params.m_frameLenPadded / 2; + float fftBinWidth = static_cast(this->m_params.m_samplingFreq) / this->m_params.m_frameLenPadded; + + float melLowFreq = MFCC::MelScale(this->m_params.m_melLoFreq, + this->m_params.m_useHtkMethod); + float melHighFreq = MFCC::MelScale(this->m_params.m_melHiFreq, + this->m_params.m_useHtkMethod); + float melFreqDelta = (melHighFreq - melLowFreq) / (this->m_params.m_numFbankBins + 1); + + std::vector thisBin = std::vector(numFftBins); + std::vector> melFilterBank( + this->m_params.m_numFbankBins); + this->m_filterBankFilterFirst = + std::vector(this->m_params.m_numFbankBins); + this->m_filterBankFilterLast = + std::vector(this->m_params.m_numFbankBins); + + for (size_t bin = 0; bin < this->m_params.m_numFbankBins; bin++) { + float leftMel = melLowFreq + bin * melFreqDelta; + float centerMel = melLowFreq + (bin + 1) * melFreqDelta; + float rightMel = melLowFreq + (bin + 2) * melFreqDelta; + + uint32_t firstIndex = 0; + uint32_t lastIndex = 0; + bool firstIndexFound = false; + const float normaliser = this->GetMelFilterBankNormaliser(leftMel, rightMel, this->m_params.m_useHtkMethod); + + for (size_t i = 0; i < numFftBins; i++) { + float freq = (fftBinWidth * i); /* Center freq of this fft bin. */ + float mel = MFCC::MelScale(freq, this->m_params.m_useHtkMethod); + thisBin[i] = 0.0; + + if (mel > leftMel && mel < rightMel) { + float weight; + if (mel <= centerMel) { + weight = (mel - leftMel) / (centerMel - leftMel); + } else { + weight = (rightMel - mel) / (rightMel - centerMel); + } + + thisBin[i] = weight * normaliser; + if (!firstIndexFound) { + firstIndex = i; + firstIndexFound = true; + } + lastIndex = i; + } + } + + this->m_filterBankFilterFirst[bin] = firstIndex; + this->m_filterBankFilterLast[bin] = lastIndex; + + /* Copy the part we care about. */ + for (uint32_t i = firstIndex; i <= lastIndex; i++) { + melFilterBank[bin].push_back(thisBin[i]); + } + } + + return melFilterBank; + } + +} /* namespace audio */ +} /* namespace app */ +} /* namespace arm */ diff --git a/source/application/api/common/source/Model.cc b/source/application/api/common/source/Model.cc new file mode 100644 index 0000000..f1ac91d --- /dev/null +++ b/source/application/api/common/source/Model.cc @@ -0,0 +1,359 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "Model.hpp" +#include "log_macros.h" + +#include + +/* Initialise the model */ +arm::app::Model::~Model() +{ + delete this->m_pInterpreter; + /** + * No clean-up function available for allocator in TensorFlow Lite Micro yet. + **/ +} + +arm::app::Model::Model() : + m_inited (false), + m_type(kTfLiteNoType) +{ + this->m_pErrorReporter = tflite::GetMicroErrorReporter(); +} + +bool arm::app::Model::Init(uint8_t* tensorArenaAddr, + uint32_t tensorArenaSize, + uint8_t* nnModelAddr, + uint32_t nnModelSize, + tflite::MicroAllocator* allocator) +{ + /* Following tf lite micro example: + * Map the model into a usable data structure. This doesn't involve any + * copying or parsing, it's a very lightweight operation. */ + debug("loading model from @ 0x%p\n", nnModelAddr); + debug("model size: %" PRIu32 " bytes.\n", nnModelSize); + + this->m_pModel = ::tflite::GetModel(nnModelAddr); + + if (this->m_pModel->version() != TFLITE_SCHEMA_VERSION) { + this->m_pErrorReporter->Report( + "[ERROR] model's schema version %d is not equal " + "to supported version %d.", + this->m_pModel->version(), TFLITE_SCHEMA_VERSION); + return false; + } + + this->m_modelAddr = nnModelAddr; + this->m_modelSize = nnModelSize; + + /* Pull in only the operation implementations we need. + * This relies on a complete list of all the ops needed by this graph. + * An easier approach is to just use the AllOpsResolver, but this will + * incur some penalty in code space for op implementations that are not + * needed by this graph. + * static ::tflite::ops::micro::AllOpsResolver resolver; */ + /* NOLINTNEXTLINE(runtime-global-variables) */ + debug("loading op resolver\n"); + + this->EnlistOperations(); + + /* Create allocator instance, if it doesn't exist */ + this->m_pAllocator = allocator; + if (!this->m_pAllocator) { + /* Create an allocator instance */ + info("Creating allocator using tensor arena at 0x%p\n", tensorArenaAddr); + + this->m_pAllocator = tflite::MicroAllocator::Create( + tensorArenaAddr, + tensorArenaSize, + this->m_pErrorReporter); + + if (!this->m_pAllocator) { + printf_err("Failed to create allocator\n"); + return false; + } + debug("Created new allocator @ 0x%p\n", this->m_pAllocator); + } else { + debug("Using existing allocator @ 0x%p\n", this->m_pAllocator); + } + + this->m_pInterpreter = new ::tflite::MicroInterpreter( + this->m_pModel, this->GetOpResolver(), + this->m_pAllocator, this->m_pErrorReporter); + + if (!this->m_pInterpreter) { + printf_err("Failed to allocate interpreter\n"); + return false; + } + + /* Allocate memory from the tensor_arena for the model's tensors. */ + info("Allocating tensors\n"); + TfLiteStatus allocate_status = this->m_pInterpreter->AllocateTensors(); + + if (allocate_status != kTfLiteOk) { + printf_err("tensor allocation failed!\n"); + delete this->m_pInterpreter; + return false; + } + + /* Get information about the memory area to use for the model's input. */ + this->m_input.resize(this->GetNumInputs()); + for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++) + this->m_input[inIndex] = this->m_pInterpreter->input(inIndex); + + this->m_output.resize(this->GetNumOutputs()); + for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++) + this->m_output[outIndex] = this->m_pInterpreter->output(outIndex); + + if (this->m_input.empty() || this->m_output.empty()) { + printf_err("failed to get tensors\n"); + return false; + } else { + this->m_type = this->m_input[0]->type; /* Input 0 should be the main input */ + + /* Clear the input & output tensors */ + for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++) { + std::memset(this->m_input[inIndex]->data.data, 0, this->m_input[inIndex]->bytes); + } + for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++) { + std::memset(this->m_output[outIndex]->data.data, 0, this->m_output[outIndex]->bytes); + } + + this->LogInterpreterInfo(); + } + + this->m_inited = true; + return true; +} + +tflite::MicroAllocator* arm::app::Model::GetAllocator() +{ + if (this->IsInited()) { + return this->m_pAllocator; + } + return nullptr; +} + +void arm::app::Model::LogTensorInfo(TfLiteTensor* tensor) +{ + if (!tensor) { + printf_err("Invalid tensor\n"); + assert(tensor); + return; + } + + debug("\ttensor is assigned to 0x%p\n", tensor); + info("\ttensor type is %s\n", TfLiteTypeGetName(tensor->type)); + info("\ttensor occupies %zu bytes with dimensions\n", + tensor->bytes); + for (int i = 0 ; i < tensor->dims->size; ++i) { + info ("\t\t%d: %3d\n", i, tensor->dims->data[i]); + } + + TfLiteQuantization quant = tensor->quantization; + if (kTfLiteAffineQuantization == quant.type) { + auto* quantParams = (TfLiteAffineQuantization*)quant.params; + info("Quant dimension: %" PRIi32 "\n", quantParams->quantized_dimension); + for (int i = 0; i < quantParams->scale->size; ++i) { + info("Scale[%d] = %f\n", i, quantParams->scale->data[i]); + } + for (int i = 0; i < quantParams->zero_point->size; ++i) { + info("ZeroPoint[%d] = %d\n", i, quantParams->zero_point->data[i]); + } + } +} + +void arm::app::Model::LogInterpreterInfo() +{ + if (!this->m_pInterpreter) { + printf_err("Invalid interpreter\n"); + return; + } + + info("Model INPUT tensors: \n"); + for (auto input : this->m_input) { + this->LogTensorInfo(input); + } + + info("Model OUTPUT tensors: \n"); + for (auto output : this->m_output) { + this->LogTensorInfo(output); + } + + info("Activation buffer (a.k.a tensor arena) size used: %zu\n", + this->m_pInterpreter->arena_used_bytes()); + + /* We expect there to be only one subgraph. */ + const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0); + info("Number of operators: %" PRIu32 "\n", nOperators); + + const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0); + + auto* opcodes = this->m_pModel->operator_codes(); + + /* For each operator, display registration information. */ + for (size_t i = 0 ; i < nOperators; ++i) { + const tflite::Operator* op = subgraph->operators()->Get(i); + const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index()); + const TfLiteRegistration* reg = nullptr; + + tflite::GetRegistrationFromOpCode(opcode, this->GetOpResolver(), + this->m_pErrorReporter, ®); + std::string opName; + + if (reg) { + if (tflite::BuiltinOperator_CUSTOM == reg->builtin_code) { + opName = std::string(reg->custom_name); + } else { + opName = std::string(EnumNameBuiltinOperator( + tflite::BuiltinOperator(reg->builtin_code))); + } + } + info("\tOperator %zu: %s\n", i, opName.c_str()); + } +} + +bool arm::app::Model::IsInited() const +{ + return this->m_inited; +} + +bool arm::app::Model::IsDataSigned() const +{ + return this->GetType() == kTfLiteInt8; +} + +bool arm::app::Model::ContainsEthosUOperator() const +{ + /* We expect there to be only one subgraph. */ + const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0); + const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0); + const auto* opcodes = this->m_pModel->operator_codes(); + + /* check for custom operators */ + for (size_t i = 0; (i < nOperators); ++i) + { + const tflite::Operator* op = subgraph->operators()->Get(i); + const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index()); + + auto builtin_code = tflite::GetBuiltinCode(opcode); + if ((builtin_code == tflite::BuiltinOperator_CUSTOM) && + ( nullptr != opcode->custom_code()) && + ( "ethos-u" == std::string(opcode->custom_code()->c_str()))) + { + return true; + } + } + return false; +} + +bool arm::app::Model::RunInference() +{ + bool inference_state = false; + if (this->m_pModel && this->m_pInterpreter) { + if (kTfLiteOk != this->m_pInterpreter->Invoke()) { + printf_err("Invoke failed.\n"); + } else { + inference_state = true; + } + } else { + printf_err("Error: No interpreter!\n"); + } + return inference_state; +} + +TfLiteTensor* arm::app::Model::GetInputTensor(size_t index) const +{ + if (index < this->GetNumInputs()) { + return this->m_input.at(index); + } + return nullptr; +} + +TfLiteTensor* arm::app::Model::GetOutputTensor(size_t index) const +{ + if (index < this->GetNumOutputs()) { + return this->m_output.at(index); + } + return nullptr; +} + +size_t arm::app::Model::GetNumInputs() const +{ + if (this->m_pModel && this->m_pInterpreter) { + return this->m_pInterpreter->inputs_size(); + } + return 0; +} + +size_t arm::app::Model::GetNumOutputs() const +{ + if (this->m_pModel && this->m_pInterpreter) { + return this->m_pInterpreter->outputs_size(); + } + return 0; +} + + +TfLiteType arm::app::Model::GetType() const +{ + return this->m_type; +} + +TfLiteIntArray* arm::app::Model::GetInputShape(size_t index) const +{ + if (index < this->GetNumInputs()) { + return this->m_input.at(index)->dims; + } + return nullptr; +} + +TfLiteIntArray* arm::app::Model::GetOutputShape(size_t index) const +{ + if (index < this->GetNumOutputs()) { + return this->m_output.at(index)->dims; + } + return nullptr; +} + +bool arm::app::Model::ShowModelInfoHandler() +{ + if (!this->IsInited()) { + printf_err("Model is not initialised! Terminating processing.\n"); + return false; + } + + PrintTensorFlowVersion(); + info("Model address: 0x%p", this->ModelPointer()); + info("Model size: %" PRIu32 " bytes.", this->ModelSize()); + info("Model info:\n"); + this->LogInterpreterInfo(); + + info("The model is optimised for Ethos-U NPU: %s.\n", this->ContainsEthosUOperator()? "yes": "no"); + + return true; +} + +const uint8_t* arm::app::Model::ModelPointer() +{ + return this->m_modelAddr; +} + +uint32_t arm::app::Model::ModelSize() +{ + return this->m_modelSize; +} diff --git a/source/application/api/common/source/TensorFlowLiteMicro.cc b/source/application/api/common/source/TensorFlowLiteMicro.cc new file mode 100644 index 0000000..8738e5c --- /dev/null +++ b/source/application/api/common/source/TensorFlowLiteMicro.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "TensorFlowLiteMicro.hpp" + +void PrintTensorFlowVersion() +{} + +arm::app::QuantParams arm::app::GetTensorQuantParams(TfLiteTensor* tensor) +{ + arm::app::QuantParams params; + if (kTfLiteAffineQuantization == tensor->quantization.type) { + auto* quantParams = (TfLiteAffineQuantization*) (tensor->quantization.params); + if (quantParams && 0 == quantParams->quantized_dimension) { + if (quantParams->scale->size) { + params.scale = quantParams->scale->data[0]; + } + if (quantParams->zero_point->size) { + params.offset = quantParams->zero_point->data[0]; + } + } else if (tensor->params.scale != 0.0) { + /* Legacy tensorflow quantisation parameters */ + params.scale = tensor->params.scale; + params.offset = tensor->params.zero_point; + } + } + return params; +} + +extern "C" void DebugLog(const char* s) +{ + puts(s); +} -- cgit v1.2.1