summaryrefslogtreecommitdiff
path: root/source/use_case/ad/src
diff options
context:
space:
mode:
authoralexander <alexander.efremov@arm.com>2021-03-26 21:42:19 +0000
committerKshitij Sisodia <kshitij.sisodia@arm.com>2021-03-29 16:29:55 +0100
commit3c79893217bc632c9b0efa815091bef3c779490c (patch)
treead06b444557eb8124652b45621d736fa1b92f65d /source/use_case/ad/src
parent6ad6d55715928de72979b04194da1bdf04a4c51b (diff)
downloadml-embedded-evaluation-kit-3c79893217bc632c9b0efa815091bef3c779490c.tar.gz
Opensource ML embedded evaluation kit21.03
Change-Id: I12e807f19f5cacad7cef82572b6dd48252fd61fd
Diffstat (limited to 'source/use_case/ad/src')
-rw-r--r--source/use_case/ad/src/AdMelSpectrogram.cc90
-rw-r--r--source/use_case/ad/src/AdModel.cc55
-rw-r--r--source/use_case/ad/src/AdPostProcessing.cc116
-rw-r--r--source/use_case/ad/src/MainLoop.cc114
-rw-r--r--source/use_case/ad/src/MelSpectrogram.cc311
-rw-r--r--source/use_case/ad/src/UseCaseHandler.cc422
6 files changed, 1108 insertions, 0 deletions
diff --git a/source/use_case/ad/src/AdMelSpectrogram.cc b/source/use_case/ad/src/AdMelSpectrogram.cc
new file mode 100644
index 0000000..183c05c
--- /dev/null
+++ b/source/use_case/ad/src/AdMelSpectrogram.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AdMelSpectrogram.hpp"
+
+#include "PlatformMath.hpp"
+
+namespace arm {
+namespace app {
+namespace audio {
+
+ bool AdMelSpectrogram::ApplyMelFilterBank(
+ std::vector<float>& fftVec,
+ std::vector<std::vector<float>>& melFilterBank,
+ std::vector<int32_t>& filterBankFilterFirst,
+ std::vector<int32_t>& filterBankFilterLast,
+ std::vector<float>& melEnergies)
+ {
+ const size_t numBanks = melEnergies.size();
+
+ if (numBanks != filterBankFilterFirst.size() ||
+ numBanks != filterBankFilterLast.size()) {
+ printf_err("unexpected filter bank lengths\n");
+ return false;
+ }
+
+ for (size_t bin = 0; bin < numBanks; ++bin) {
+ auto filterBankIter = melFilterBank[bin].begin();
+ float melEnergy = 1e-10; /* Avoid log of zero at later stages. */
+ const int32_t firstIndex = filterBankFilterFirst[bin];
+ const int32_t lastIndex = filterBankFilterLast[bin];
+
+ for (int32_t i = firstIndex; i <= lastIndex; ++i) {
+ melEnergy += (*filterBankIter++ * fftVec[i]);
+ }
+
+ melEnergies[bin] = melEnergy;
+ }
+
+ return true;
+ }
+
+ void AdMelSpectrogram::ConvertToLogarithmicScale(
+ std::vector<float>& melEnergies)
+ {
+ /* Container for natural logarithms of mel energies */
+ std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
+
+ /* Because we are taking natural logs, we need to multiply by log10(e).
+ * Also, for wav2letter model, we scale our log10 values by 10 */
+ constexpr float multiplier = 10.0 * /* default scalar */
+ 0.4342944819032518; /* log10f(std::exp(1.0))*/
+
+ /* Take log of the whole vector */
+ math::MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
+
+ /* Scale the log values. */
+ for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
+ iterM != melEnergies.end(); ++iterM, ++iterL) {
+
+ *iterM = *iterL * multiplier;
+ }
+ }
+
+ float AdMelSpectrogram::GetMelFilterBankNormaliser(
+ const float& leftMel,
+ const float& rightMel,
+ const bool useHTKMethod)
+ {
+ /* Slaney normalization for mel weights. */
+ return (2.0f / (AdMelSpectrogram::InverseMelScale(rightMel, useHTKMethod) -
+ AdMelSpectrogram::InverseMelScale(leftMel, useHTKMethod)));
+ }
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/ad/src/AdModel.cc b/source/use_case/ad/src/AdModel.cc
new file mode 100644
index 0000000..148bc98
--- /dev/null
+++ b/source/use_case/ad/src/AdModel.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AdModel.hpp"
+
+#include "hal.h"
+
+const tflite::MicroOpResolver& arm::app::AdModel::GetOpResolver()
+{
+ return this->_m_opResolver;
+}
+
+bool arm::app::AdModel::EnlistOperations()
+{
+ this->_m_opResolver.AddAveragePool2D();
+ this->_m_opResolver.AddConv2D();
+ this->_m_opResolver.AddDepthwiseConv2D();
+ this->_m_opResolver.AddRelu6();
+ this->_m_opResolver.AddReshape();
+
+#if defined(ARM_NPU)
+ if (kTfLiteOk == this->_m_opResolver.AddEthosU()) {
+ info("Added %s support to op resolver\n",
+ tflite::GetString_ETHOSU());
+ } else {
+ printf_err("Failed to add Arm NPU support to op resolver.");
+ return false;
+ }
+#endif /* ARM_NPU */
+ return true;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::AdModel::ModelPointer()
+{
+ return GetModelPointer();
+}
+extern size_t GetModelLen();
+size_t arm::app::AdModel::ModelSize()
+{
+ return GetModelLen();
+}
diff --git a/source/use_case/ad/src/AdPostProcessing.cc b/source/use_case/ad/src/AdPostProcessing.cc
new file mode 100644
index 0000000..157784b
--- /dev/null
+++ b/source/use_case/ad/src/AdPostProcessing.cc
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AdPostProcessing.hpp"
+
+#include "hal.h"
+
+#include <numeric>
+#include <cmath>
+#include <string>
+
+namespace arm {
+namespace app {
+
+ template<typename T>
+ std::vector<float> Dequantize(TfLiteTensor* tensor) {
+
+ if (tensor == nullptr) {
+ printf_err("Tensor is null pointer can not dequantize.\n");
+ return std::vector<float>();
+ }
+ T* tensorData = tflite::GetTensorData<T>(tensor);
+
+ uint32_t totalOutputSize = 1;
+ for (int inputDim = 0; inputDim < tensor->dims->size; inputDim++){
+ totalOutputSize *= tensor->dims->data[inputDim];
+ }
+
+ /* For getting the floating point values, we need quantization parameters */
+ QuantParams quantParams = GetTensorQuantParams(tensor);
+
+ std::vector<float> dequantizedOutput(totalOutputSize);
+
+ for (size_t i = 0; i < totalOutputSize; ++i) {
+ dequantizedOutput[i] = quantParams.scale * (tensorData[i] - quantParams.offset);
+ }
+
+ return dequantizedOutput;
+ }
+
+ void Softmax(std::vector<float>& inputVector) {
+ auto start = inputVector.begin();
+ auto end = inputVector.end();
+
+ /* Fix for numerical stability and apply exp. */
+ float maxValue = *std::max_element(start, end);
+ for (auto it = start; it!=end; ++it) {
+ *it = std::exp((*it) - maxValue);
+ }
+
+ float sumExp = std::accumulate(start, end, 0.0f);
+
+ for (auto it = start; it!=end; ++it) {
+ *it = (*it)/sumExp;
+ }
+ }
+
+ int8_t OutputIndexFromFileName(std::string wavFileName) {
+ /* Filename is assumed in the form machine_id_00.wav */
+ std::string delimiter = "_"; /* First character used to split the file name up. */
+ size_t delimiterStart;
+ std::string subString;
+ size_t machineIdxInString = 3; /* Which part of the file name the machine id should be at. */
+
+ for (size_t i = 0; i < machineIdxInString; ++i) {
+ delimiterStart = wavFileName.find(delimiter);
+ subString = wavFileName.substr(0, delimiterStart);
+ wavFileName.erase(0, delimiterStart + delimiter.length());
+ }
+
+ /* At this point substring should be 00.wav */
+ delimiter = "."; /* Second character used to split the file name up. */
+ delimiterStart = subString.find(delimiter);
+ subString = (delimiterStart != std::string::npos) ? subString.substr(0, delimiterStart) : subString;
+
+ auto is_number = [](const std::string& str) -> bool
+ {
+ std::string::const_iterator it = str.begin();
+ while (it != str.end() && std::isdigit(*it)) ++it;
+ return !str.empty() && it == str.end();
+ };
+
+ const int8_t machineIdx = is_number(subString) ? std::stoi(subString) : -1;
+
+ /* Return corresponding index in the output vector. */
+ if (machineIdx == 0) {
+ return 0;
+ } else if (machineIdx == 2) {
+ return 1;
+ } else if (machineIdx == 4) {
+ return 2;
+ } else if (machineIdx == 6) {
+ return 3;
+ } else {
+ printf_err("%d is an invalid machine index \n", machineIdx);
+ return -1;
+ }
+ }
+
+ template std::vector<float> Dequantize<uint8_t>(TfLiteTensor* tensor);
+ template std::vector<float> Dequantize<int8_t>(TfLiteTensor* tensor);
+} /* namespace app */
+} /* namespace arm */ \ No newline at end of file
diff --git a/source/use_case/ad/src/MainLoop.cc b/source/use_case/ad/src/MainLoop.cc
new file mode 100644
index 0000000..5455b43
--- /dev/null
+++ b/source/use_case/ad/src/MainLoop.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h" /* Brings in platform definitions */
+#include "InputFiles.hpp" /* For input data */
+#include "AdModel.hpp" /* Model class for running inference */
+#include "UseCaseCommonUtils.hpp" /* Utils functions */
+#include "UseCaseHandler.hpp" /* Handlers for different user options */
+
+enum opcodes
+{
+ MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector */
+ MENU_OPT_RUN_INF_CHOSEN, /* Run on a user provided vector index */
+ MENU_OPT_RUN_INF_ALL, /* Run inference on all */
+ MENU_OPT_SHOW_MODEL_INFO, /* Show model info */
+ MENU_OPT_LIST_AUDIO_CLIPS /* List the current baked audio signals */
+};
+
+static void DisplayMenu()
+{
+ printf("\n\nUser input required\n");
+ printf("Enter option number from:\n\n");
+ printf(" %u. Classify next audio signal\n", MENU_OPT_RUN_INF_NEXT);
+ printf(" %u. Classify audio signal at chosen index\n", MENU_OPT_RUN_INF_CHOSEN);
+ printf(" %u. Run classification on all audio signals\n", MENU_OPT_RUN_INF_ALL);
+ printf(" %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO);
+ printf(" %u. List audio signals\n\n", MENU_OPT_LIST_AUDIO_CLIPS);
+ printf(" Choice: ");
+}
+
+
+void main_loop(hal_platform& platform)
+{
+ arm::app::AdModel model; /* Model wrapper object. */
+
+ /* Load the model. */
+ if (!model.Init())
+ {
+ printf_err("failed to initialise model\n");
+ return;
+ }
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+ caseContext.Set<uint32_t>("clipIndex", 0);
+ caseContext.Set<int>("frameLength", g_FrameLength);
+ caseContext.Set<int>("frameStride", g_FrameStride);
+ caseContext.Set<float>("scoreThreshold", g_ScoreThreshold);
+ caseContext.Set<float>("trainingMean", g_TrainingMean);
+
+ /* Main program loop. */
+ bool executionSuccessful = true;
+ constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+
+ /* Loop. */
+ do {
+ int menuOption = MENU_OPT_RUN_INF_NEXT;
+ if (bUseMenu) {
+ DisplayMenu();
+ menuOption = arm::app::ReadUserInputAsInt(platform);
+ printf("\n");
+ }
+ switch (menuOption) {
+ case MENU_OPT_RUN_INF_NEXT:
+ executionSuccessful = ClassifyVibrationHandler(
+ caseContext,
+ caseContext.Get<uint32_t>("clipIndex"),
+ false);
+ break;
+ case MENU_OPT_RUN_INF_CHOSEN: {
+ printf(" Enter the data index [0, %d]: ",
+ NUMBER_OF_FILES-1);
+ auto audioIndex = static_cast<uint32_t>(
+ arm::app::ReadUserInputAsInt(platform));
+ executionSuccessful = ClassifyVibrationHandler(caseContext,
+ audioIndex,
+ false);
+ break;
+ }
+ case MENU_OPT_RUN_INF_ALL:
+ executionSuccessful = ClassifyVibrationHandler(
+ caseContext,
+ caseContext.Get<uint32_t>("clipIndex"),
+ true);
+ break;
+ case MENU_OPT_SHOW_MODEL_INFO:
+ executionSuccessful = model.ShowModelInfoHandler();
+ break;
+ case MENU_OPT_LIST_AUDIO_CLIPS:
+ executionSuccessful = ListFilesHandler(caseContext);
+ break;
+ default:
+ printf("Incorrect choice, try again.");
+ break;
+ }
+ } while (executionSuccessful && bUseMenu);
+ info("Main loop terminated.\n");
+}
diff --git a/source/use_case/ad/src/MelSpectrogram.cc b/source/use_case/ad/src/MelSpectrogram.cc
new file mode 100644
index 0000000..86d57e6
--- /dev/null
+++ b/source/use_case/ad/src/MelSpectrogram.cc
@@ -0,0 +1,311 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "MelSpectrogram.hpp"
+
+#include "PlatformMath.hpp"
+
+#include <cfloat>
+
+namespace arm {
+namespace app {
+namespace audio {
+
+ MelSpecParams::MelSpecParams(
+ const float samplingFreq,
+ const uint32_t numFbankBins,
+ const float melLoFreq,
+ const float melHiFreq,
+ const uint32_t frameLen,
+ const bool useHtkMethod):
+ m_samplingFreq(samplingFreq),
+ m_numFbankBins(numFbankBins),
+ m_melLoFreq(melLoFreq),
+ m_melHiFreq(melHiFreq),
+ m_frameLen(frameLen),
+
+ /* Smallest power of 2 >= frame length. */
+ m_frameLenPadded(pow(2, ceil((log(frameLen)/log(2))))),
+ m_useHtkMethod(useHtkMethod)
+ {}
+
+ std::string MelSpecParams::Str()
+ {
+ char strC[1024];
+ snprintf(strC, sizeof(strC) - 1, "\n \
+ \n\t Sampling frequency: %f\
+ \n\t Number of filter banks: %u\
+ \n\t Mel frequency limit (low): %f\
+ \n\t Mel frequency limit (high): %f\
+ \n\t Frame length: %u\
+ \n\t Padded frame length: %u\
+ \n\t Using HTK for Mel scale: %s\n",
+ this->m_samplingFreq, this->m_numFbankBins, this->m_melLoFreq,
+ this->m_melHiFreq, this->m_frameLen,
+ this->m_frameLenPadded, this->m_useHtkMethod ? "yes" : "no");
+ return std::string{strC};
+ }
+
+ MelSpectrogram::MelSpectrogram(const MelSpecParams& params):
+ _m_params(params),
+ _m_filterBankInitialised(false)
+ {
+ this->_m_buffer = std::vector<float>(
+ this->_m_params.m_frameLenPadded, 0.0);
+ this->_m_frame = std::vector<float>(
+ this->_m_params.m_frameLenPadded, 0.0);
+ this->_m_melEnergies = std::vector<float>(
+ this->_m_params.m_numFbankBins, 0.0);
+
+ this->_m_windowFunc = std::vector<float>(this->_m_params.m_frameLen);
+ const float multiplier = 2 * M_PI / this->_m_params.m_frameLen;
+
+ /* Create window function. */
+ for (size_t i = 0; i < this->_m_params.m_frameLen; ++i) {
+ this->_m_windowFunc[i] = (0.5 - (0.5 *
+ math::MathUtils::CosineF32(static_cast<float>(i) * multiplier)));
+ }
+
+ math::MathUtils::FftInitF32(this->_m_params.m_frameLenPadded, this->_m_fftInstance);
+ debug("Instantiated Mel Spectrogram object: %s\n", this->_m_params.Str().c_str());
+ }
+
+ void MelSpectrogram::Init()
+ {
+ this->_InitMelFilterBank();
+ }
+
+ float MelSpectrogram::MelScale(const float freq, const bool useHTKMethod)
+ {
+ if (useHTKMethod) {
+ return 1127.0f * logf (1.0f + freq / 700.0f);
+ } else {
+ /* Slaney formula for mel scale. */
+ float mel = freq / ms_freqStep;
+
+ if (freq >= ms_minLogHz) {
+ mel = ms_minLogMel + logf(freq / ms_minLogHz) / ms_logStep;
+ }
+ return mel;
+ }
+ }
+
+ float MelSpectrogram::InverseMelScale(const float melFreq, const bool useHTKMethod)
+ {
+ if (useHTKMethod) {
+ return 700.0f * (expf (melFreq / 1127.0f) - 1.0f);
+ } else {
+ /* Slaney formula for inverse mel scale. */
+ float freq = ms_freqStep * melFreq;
+
+ if (melFreq >= ms_minLogMel) {
+ freq = ms_minLogHz * expf(ms_logStep * (melFreq - ms_minLogMel));
+ }
+ return freq;
+ }
+ }
+
+ bool MelSpectrogram::ApplyMelFilterBank(
+ std::vector<float>& fftVec,
+ std::vector<std::vector<float>>& melFilterBank,
+ std::vector<int32_t>& filterBankFilterFirst,
+ std::vector<int32_t>& filterBankFilterLast,
+ std::vector<float>& melEnergies)
+ {
+ const size_t numBanks = melEnergies.size();
+
+ if (numBanks != filterBankFilterFirst.size() ||
+ numBanks != filterBankFilterLast.size()) {
+ printf_err("unexpected filter bank lengths\n");
+ return false;
+ }
+
+ for (size_t bin = 0; bin < numBanks; ++bin) {
+ auto filterBankIter = melFilterBank[bin].begin();
+ float melEnergy = FLT_MIN; /* Avoid log of zero at later stages */
+ int32_t firstIndex = filterBankFilterFirst[bin];
+ int32_t lastIndex = filterBankFilterLast[bin];
+
+ for (int i = firstIndex; i <= lastIndex; ++i) {
+ float energyRep = math::MathUtils::SqrtF32(fftVec[i]);
+ melEnergy += (*filterBankIter++ * energyRep);
+ }
+
+ melEnergies[bin] = melEnergy;
+ }
+
+ return true;
+ }
+
+ void MelSpectrogram::ConvertToLogarithmicScale(std::vector<float>& melEnergies)
+ {
+ for (size_t bin = 0; bin < melEnergies.size(); ++bin) {
+ melEnergies[bin] = logf(melEnergies[bin]);
+ }
+ }
+
+ void MelSpectrogram::_ConvertToPowerSpectrum()
+ {
+ const uint32_t halfDim = this->_m_params.m_frameLenPadded / 2;
+
+ /* Handle this special case. */
+ float firstEnergy = this->_m_buffer[0] * this->_m_buffer[0];
+ float lastEnergy = this->_m_buffer[1] * this->_m_buffer[1];
+
+ math::MathUtils::ComplexMagnitudeSquaredF32(
+ this->_m_buffer.data(),
+ this->_m_buffer.size(),
+ this->_m_buffer.data(),
+ this->_m_buffer.size()/2);
+
+ this->_m_buffer[0] = firstEnergy;
+ this->_m_buffer[halfDim] = lastEnergy;
+ }
+
+ float MelSpectrogram::GetMelFilterBankNormaliser(
+ const float& leftMel,
+ const float& rightMel,
+ const bool useHTKMethod)
+ {
+ UNUSED(leftMel);
+ UNUSED(rightMel);
+ UNUSED(useHTKMethod);
+
+ /* By default, no normalisation => return 1 */
+ return 1.f;
+ }
+
+ void MelSpectrogram::_InitMelFilterBank()
+ {
+ if (!this->_IsMelFilterBankInited()) {
+ this->_m_melFilterBank = this->_CreateMelFilterBank();
+ this->_m_filterBankInitialised = true;
+ }
+ }
+
+ bool MelSpectrogram::_IsMelFilterBankInited()
+ {
+ return this->_m_filterBankInitialised;
+ }
+
+ std::vector<float> MelSpectrogram::ComputeMelSpec(const std::vector<int16_t>& audioData, float trainingMean)
+ {
+ this->_InitMelFilterBank();
+
+ /* TensorFlow way of normalizing .wav data to (-1, 1). */
+ constexpr float normaliser = 1.0/(1<<15);
+ for (size_t i = 0; i < this->_m_params.m_frameLen; ++i) {
+ this->_m_frame[i] = static_cast<float>(audioData[i]) * normaliser;
+ }
+
+ /* Apply window function to input frame. */
+ for(size_t i = 0; i < this->_m_params.m_frameLen; ++i) {
+ this->_m_frame[i] *= this->_m_windowFunc[i];
+ }
+
+ /* Set remaining frame values to 0. */
+ std::fill(this->_m_frame.begin() + this->_m_params.m_frameLen,this->_m_frame.end(), 0);
+
+ /* Compute FFT. */
+ math::MathUtils::FftF32(this->_m_frame, this->_m_buffer, this->_m_fftInstance);
+
+ /* Convert to power spectrum. */
+ this->_ConvertToPowerSpectrum();
+
+ /* Apply mel filterbanks. */
+ if (!this->ApplyMelFilterBank(this->_m_buffer,
+ this->_m_melFilterBank,
+ this->_m_filterBankFilterFirst,
+ this->_m_filterBankFilterLast,
+ this->_m_melEnergies)) {
+ printf_err("Failed to apply MEL filter banks\n");
+ }
+
+ /* Convert to logarithmic scale */
+ this->ConvertToLogarithmicScale(this->_m_melEnergies);
+
+ /* Perform mean subtraction. */
+ for (auto& energy:this->_m_melEnergies) {
+ energy -= trainingMean;
+ }
+
+ return this->_m_melEnergies;
+ }
+
+ std::vector<std::vector<float>> MelSpectrogram::_CreateMelFilterBank()
+ {
+ size_t numFftBins = this->_m_params.m_frameLenPadded / 2;
+ float fftBinWidth = static_cast<float>(this->_m_params.m_samplingFreq) / this->_m_params.m_frameLenPadded;
+
+ float melLowFreq = MelSpectrogram::MelScale(this->_m_params.m_melLoFreq,
+ this->_m_params.m_useHtkMethod);
+ float melHighFreq = MelSpectrogram::MelScale(this->_m_params.m_melHiFreq,
+ this->_m_params.m_useHtkMethod);
+ float melFreqDelta = (melHighFreq - melLowFreq) / (this->_m_params.m_numFbankBins + 1);
+
+ std::vector<float> thisBin = std::vector<float>(numFftBins);
+ std::vector<std::vector<float>> melFilterBank(
+ this->_m_params.m_numFbankBins);
+ this->_m_filterBankFilterFirst =
+ std::vector<int32_t>(this->_m_params.m_numFbankBins);
+ this->_m_filterBankFilterLast =
+ std::vector<int32_t>(this->_m_params.m_numFbankBins);
+
+ for (size_t bin = 0; bin < this->_m_params.m_numFbankBins; bin++) {
+ float leftMel = melLowFreq + bin * melFreqDelta;
+ float centerMel = melLowFreq + (bin + 1) * melFreqDelta;
+ float rightMel = melLowFreq + (bin + 2) * melFreqDelta;
+
+ int32_t firstIndex = -1;
+ int32_t lastIndex = -1;
+ const float normaliser = this->GetMelFilterBankNormaliser(leftMel, rightMel, this->_m_params.m_useHtkMethod);
+
+ for (size_t i = 0; i < numFftBins; ++i) {
+ float freq = (fftBinWidth * i); /* Center freq of this fft bin. */
+ float mel = MelSpectrogram::MelScale(freq, this->_m_params.m_useHtkMethod);
+ thisBin[i] = 0.0;
+
+ if (mel > leftMel && mel < rightMel) {
+ float weight;
+ if (mel <= centerMel) {
+ weight = (mel - leftMel) / (centerMel - leftMel);
+ } else {
+ weight = (rightMel - mel) / (rightMel - centerMel);
+ }
+
+ thisBin[i] = weight * normaliser;
+ if (firstIndex == -1) {
+ firstIndex = i;
+ }
+ lastIndex = i;
+ }
+ }
+
+ this->_m_filterBankFilterFirst[bin] = firstIndex;
+ this->_m_filterBankFilterLast[bin] = lastIndex;
+
+ /* Copy the part we care about. */
+ for (int32_t i = firstIndex; i <= lastIndex; ++i) {
+ melFilterBank[bin].push_back(thisBin[i]);
+ }
+ }
+
+ return melFilterBank;
+ }
+
+} /* namespace audio */
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/ad/src/UseCaseHandler.cc b/source/use_case/ad/src/UseCaseHandler.cc
new file mode 100644
index 0000000..c18a0a4
--- /dev/null
+++ b/source/use_case/ad/src/UseCaseHandler.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+
+#include "AdModel.hpp"
+#include "InputFiles.hpp"
+#include "Classifier.hpp"
+#include "hal.h"
+#include "AdMelSpectrogram.hpp"
+#include "AudioUtils.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "AdPostProcessing.hpp"
+
+namespace arm {
+namespace app {
+
+ /**
+ * @brief Helper function to increment current audio clip index
+ * @param[in/out] ctx pointer to the application context object
+ **/
+ static void _IncrementAppCtxClipIdx(ApplicationContext& ctx);
+
+ /**
+ * @brief Helper function to set the audio clip index
+ * @param[in/out] ctx pointer to the application context object
+ * @param[in] idx value to be set
+ * @return true if index is set, false otherwise
+ **/
+ static bool _SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
+
+ /**
+ * @brief Presents inference results using the data presentation
+ * object.
+ * @param[in] platform reference to the hal platform object
+ * @param[in] result average sum of classification results
+ * @param[in] threhsold if larger than this value we have an anomaly
+ * @return true if successful, false otherwise
+ **/
+ static bool _PresentInferenceResult(hal_platform& platform, float result, float threshold);
+
+ /**
+ * @brief Returns a function to perform feature calculation and populates input tensor data with
+ * MelSpe data.
+ *
+ * Input tensor data type check is performed to choose correct MFCC feature data type.
+ * If tensor has an integer data type then original features are quantised.
+ *
+ * Warning: mfcc calculator provided as input must have the same life scope as returned function.
+ *
+ * @param[in] mfcc MFCC feature calculator.
+ * @param[in/out] inputTensor Input tensor pointer to store calculated features.
+ * @param[i] cacheSize Size of the feture vectors cache (number of feature vectors).
+ * @return function function to be called providing audio sample and sliding window index.
+ */
+ static std::function<void (std::vector<int16_t>&, int, bool, size_t, size_t)>
+ GetFeatureCalculator(audio::AdMelSpectrogram& melSpec,
+ TfLiteTensor* inputTensor,
+ size_t cacheSize,
+ float trainingMean);
+
+ /* Vibration classification handler */
+ bool ClassifyVibrationHandler(ApplicationContext& ctx, uint32_t clipIndex, bool runAll)
+ {
+ auto& platform = ctx.Get<hal_platform&>("platform");
+
+ constexpr uint32_t dataPsnTxtInfStartX = 20;
+ constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+ platform.data_psn->clear(COLOR_BLACK);
+
+ auto& model = ctx.Get<Model&>("model");
+
+ /* If the request has a valid size, set the audio index */
+ if (clipIndex < NUMBER_OF_FILES) {
+ if (!_SetAppCtxClipIdx(ctx, clipIndex)) {
+ return false;
+ }
+ }
+ if (!model.IsInited()) {
+ printf_err("Model is not initialised! Terminating processing.\n");
+ return false;
+ }
+
+ const auto frameLength = ctx.Get<int>("frameLength");
+ const auto frameStride = ctx.Get<int>("frameStride");
+ const auto scoreThreshold = ctx.Get<float>("scoreThreshold");
+ const float trainingMean = ctx.Get<float>("trainingMean");
+ auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
+
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+
+ if (!inputTensor->dims) {
+ printf_err("Invalid input tensor dims\n");
+ return false;
+ }
+
+ TfLiteIntArray* inputShape = model.GetInputShape(0);
+ const uint32_t kNumRows = inputShape->data[1];
+ const uint32_t kNumCols = inputShape->data[2];
+
+ audio::AdMelSpectrogram melSpec = audio::AdMelSpectrogram(frameLength);
+ melSpec.Init();
+
+ /* Deduce the data length required for 1 inference from the network parameters. */
+ const uint8_t inputResizeScale = 2;
+ const uint32_t audioDataWindowSize = (((inputResizeScale * kNumCols) - 1) * frameStride) + frameLength;
+
+ /* We are choosing to move by 20 frames across the audio for each inference. */
+ const uint8_t nMelSpecVectorsInAudioStride = 20;
+
+ auto audioDataStride = nMelSpecVectorsInAudioStride * frameStride;
+
+ do {
+ auto currentIndex = ctx.Get<uint32_t>("clipIndex");
+
+ /* Get the output index to look at based on id in the filename. */
+ int8_t machineOutputIndex = OutputIndexFromFileName(get_filename(currentIndex));
+ if (machineOutputIndex == -1) {
+ return false;
+ }
+
+ /* Creating a Mel Spectrogram sliding window for the data required for 1 inference.
+ * "resizing" done here by multiplying stride by resize scale. */
+ auto audioMelSpecWindowSlider = audio::SlidingWindow<const int16_t>(
+ get_audio_array(currentIndex),
+ audioDataWindowSize, frameLength,
+ frameStride * inputResizeScale);
+
+ /* Creating a sliding window through the whole audio clip. */
+ auto audioDataSlider = audio::SlidingWindow<const int16_t>(
+ get_audio_array(currentIndex),
+ get_audio_array_size(currentIndex),
+ audioDataWindowSize, audioDataStride);
+
+ /* Calculate number of the feature vectors in the window overlap region taking into account resizing.
+ * These feature vectors will be reused.*/
+ auto numberOfReusedFeatureVectors = kNumRows - (nMelSpecVectorsInAudioStride / inputResizeScale);
+
+ /* Construct feature calculation function. */
+ auto melSpecFeatureCalc = GetFeatureCalculator(melSpec, inputTensor,
+ numberOfReusedFeatureVectors, trainingMean);
+ if (!melSpecFeatureCalc){
+ return false;
+ }
+
+ /* Result is an averaged sum over inferences. */
+ float result = 0;
+
+ /* Display message on the LCD - inference running. */
+ std::string str_inf{"Running inference... "};
+ platform.data_psn->present_data_text(
+ str_inf.c_str(), str_inf.size(),
+ dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+ info("Running inference on audio clip %u => %s\n", currentIndex, get_filename(currentIndex));
+
+ /* Start sliding through audio clip. */
+ while (audioDataSlider.HasNext()) {
+ const int16_t *inferenceWindow = audioDataSlider.Next();
+
+ /* We moved to the next window - set the features sliding to the new address. */
+ audioMelSpecWindowSlider.Reset(inferenceWindow);
+
+ /* The first window does not have cache ready. */
+ bool useCache = audioDataSlider.Index() > 0 && numberOfReusedFeatureVectors > 0;
+
+ /* Start calculating features inside one audio sliding window. */
+ while (audioMelSpecWindowSlider.HasNext()) {
+ const int16_t *melSpecWindow = audioMelSpecWindowSlider.Next();
+ std::vector<int16_t> melSpecAudioData = std::vector<int16_t>(melSpecWindow,
+ melSpecWindow + frameLength);
+
+ /* Compute features for this window and write them to input tensor. */
+ melSpecFeatureCalc(melSpecAudioData, audioMelSpecWindowSlider.Index(),
+ useCache, nMelSpecVectorsInAudioStride, inputResizeScale);
+ }
+
+ info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
+ audioDataSlider.TotalStrides() + 1);
+
+ /* Run inference over this audio clip sliding window */
+ arm::app::RunInference(platform, model);
+
+ /* Use the negative softmax score of the corresponding index as the outlier score */
+ std::vector<float> dequantOutput = Dequantize<int8_t>(outputTensor);
+ Softmax(dequantOutput);
+ result += -dequantOutput[machineOutputIndex];
+
+#if VERIFY_TEST_OUTPUT
+ arm::app::DumpTensor(outputTensor);
+#endif /* VERIFY_TEST_OUTPUT */
+ } /* while (audioDataSlider.HasNext()) */
+
+ /* Use average over whole clip as final score. */
+ result /= (audioDataSlider.TotalStrides() + 1);
+
+ /* Erase. */
+ str_inf = std::string(str_inf.size(), ' ');
+ platform.data_psn->present_data_text(
+ str_inf.c_str(), str_inf.size(),
+ dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+ ctx.Set<float>("result", result);
+ if (!_PresentInferenceResult(platform, result, scoreThreshold)) {
+ return false;
+ }
+
+ _IncrementAppCtxClipIdx(ctx);
+
+ } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
+
+ return true;
+ }
+
+ static void _IncrementAppCtxClipIdx(ApplicationContext& ctx)
+ {
+ auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
+
+ if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
+ ctx.Set<uint32_t>("clipIndex", 0);
+ return;
+ }
+ ++curAudioIdx;
+ ctx.Set<uint32_t>("clipIndex", curAudioIdx);
+ }
+
+ static bool _SetAppCtxClipIdx(ApplicationContext& ctx, const uint32_t idx)
+ {
+ if (idx >= NUMBER_OF_FILES) {
+ printf_err("Invalid idx %u (expected less than %u)\n",
+ idx, NUMBER_OF_FILES);
+ return false;
+ }
+ ctx.Set<uint32_t>("clipIndex", idx);
+ return true;
+ }
+
+ static bool _PresentInferenceResult(hal_platform& platform, float result, float threshold)
+ {
+ constexpr uint32_t dataPsnTxtStartX1 = 20;
+ constexpr uint32_t dataPsnTxtStartY1 = 30;
+ constexpr uint32_t dataPsnTxtYIncr = 16; /* Row index increment */
+
+ platform.data_psn->set_text_color(COLOR_GREEN);
+
+ /* Display each result */
+ uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
+
+ std::string resultStr = std::string{"Average anomaly score is: "} + std::to_string(result) +
+ std::string("\n") + std::string("Anomaly threshold is: ") + std::to_string(threshold) +
+ std::string("\n");
+
+ if (result > threshold) {
+ resultStr += std::string("Anomaly detected!");
+ } else {
+ resultStr += std::string("Everything fine, no anomaly detected!");
+ }
+
+ platform.data_psn->present_data_text(
+ resultStr.c_str(), resultStr.size(),
+ dataPsnTxtStartX1, rowIdx1, 0);
+
+ info("%s\n", resultStr.c_str());
+
+ return true;
+ }
+
+ /**
+ * @brief Generic feature calculator factory.
+ *
+ * Returns lambda function to compute features using features cache.
+ * Real features math is done by a lambda function provided as a parameter.
+ * Features are written to input tensor memory.
+ *
+ * @tparam T feature vector type.
+ * @param inputTensor model input tensor pointer.
+ * @param cacheSize number of feature vectors to cache. Defined by the sliding window overlap.
+ * @param compute features calculator function.
+ * @return lambda function to compute features.
+ */
+ template<class T>
+ std::function<void (std::vector<int16_t>&, size_t, bool, size_t, size_t)>
+ _FeatureCalc(TfLiteTensor* inputTensor, size_t cacheSize,
+ std::function<std::vector<T> (std::vector<int16_t>& )> compute)
+ {
+ /* Feature cache to be captured by lambda function*/
+ static std::vector<std::vector<T>> featureCache = std::vector<std::vector<T>>(cacheSize);
+
+ return [=](std::vector<int16_t>& audioDataWindow,
+ size_t index,
+ bool useCache,
+ size_t featuresOverlapIndex,
+ size_t resizeScale)
+ {
+ T *tensorData = tflite::GetTensorData<T>(inputTensor);
+ std::vector<T> features;
+
+ /* Reuse features from cache if cache is ready and sliding windows overlap.
+ * Overlap is in the beginning of sliding window with a size of a feature cache. */
+ if (useCache && index < featureCache.size()) {
+ features = std::move(featureCache[index]);
+ } else {
+ features = std::move(compute(audioDataWindow));
+ }
+ auto size = features.size() / resizeScale;
+ auto sizeBytes = sizeof(T);
+
+ /* Input should be transposed and "resized" by skipping elements. */
+ for (size_t outIndex = 0; outIndex < size; outIndex++) {
+ std::memcpy(tensorData + (outIndex*size) + index, &features[outIndex*resizeScale], sizeBytes);
+ }
+
+ /* Start renewing cache as soon iteration goes out of the windows overlap. */
+ if (index >= featuresOverlapIndex / resizeScale) {
+ featureCache[index - featuresOverlapIndex / resizeScale] = std::move(features);
+ }
+ };
+ }
+
+ template std::function<void (std::vector<int16_t>&, size_t , bool, size_t, size_t)>
+ _FeatureCalc<int8_t>(TfLiteTensor* inputTensor,
+ size_t cacheSize,
+ std::function<std::vector<int8_t> (std::vector<int16_t>&)> compute);
+
+ template std::function<void (std::vector<int16_t>&, size_t , bool, size_t, size_t)>
+ _FeatureCalc<uint8_t>(TfLiteTensor* inputTensor,
+ size_t cacheSize,
+ std::function<std::vector<uint8_t> (std::vector<int16_t>&)> compute);
+
+ template std::function<void (std::vector<int16_t>&, size_t , bool, size_t, size_t)>
+ _FeatureCalc<int16_t>(TfLiteTensor* inputTensor,
+ size_t cacheSize,
+ std::function<std::vector<int16_t> (std::vector<int16_t>&)> compute);
+
+ template std::function<void(std::vector<int16_t>&, size_t, bool, size_t, size_t)>
+ _FeatureCalc<float>(TfLiteTensor *inputTensor,
+ size_t cacheSize,
+ std::function<std::vector<float>(std::vector<int16_t>&)> compute);
+
+
+ static std::function<void (std::vector<int16_t>&, int, bool, size_t, size_t)>
+ GetFeatureCalculator(audio::AdMelSpectrogram& melSpec, TfLiteTensor* inputTensor, size_t cacheSize, float trainingMean)
+ {
+ std::function<void (std::vector<int16_t>&, size_t, bool, size_t, size_t)> melSpecFeatureCalc;
+
+ TfLiteQuantization quant = inputTensor->quantization;
+
+ if (kTfLiteAffineQuantization == quant.type) {
+
+ auto *quantParams = (TfLiteAffineQuantization *) quant.params;
+ const float quantScale = quantParams->scale->data[0];
+ const int quantOffset = quantParams->zero_point->data[0];
+
+ switch (inputTensor->type) {
+ case kTfLiteInt8: {
+ melSpecFeatureCalc = _FeatureCalc<int8_t>(inputTensor,
+ cacheSize,
+ [=, &melSpec](std::vector<int16_t>& audioDataWindow) {
+ return melSpec.MelSpecComputeQuant<int8_t>(audioDataWindow,
+ quantScale,
+ quantOffset,
+ trainingMean);
+ }
+ );
+ break;
+ }
+ case kTfLiteUInt8: {
+ melSpecFeatureCalc = _FeatureCalc<uint8_t>(inputTensor,
+ cacheSize,
+ [=, &melSpec](std::vector<int16_t>& audioDataWindow) {
+ return melSpec.MelSpecComputeQuant<uint8_t>(audioDataWindow,
+ quantScale,
+ quantOffset,
+ trainingMean);
+ }
+ );
+ break;
+ }
+ case kTfLiteInt16: {
+ melSpecFeatureCalc = _FeatureCalc<int16_t>(inputTensor,
+ cacheSize,
+ [=, &melSpec](std::vector<int16_t>& audioDataWindow) {
+ return melSpec.MelSpecComputeQuant<int16_t>(audioDataWindow,
+ quantScale,
+ quantOffset,
+ trainingMean);
+ }
+ );
+ break;
+ }
+ default:
+ printf_err("Tensor type %s not supported\n", TfLiteTypeGetName(inputTensor->type));
+ }
+
+
+ } else {
+ melSpecFeatureCalc = melSpecFeatureCalc = _FeatureCalc<float>(inputTensor,
+ cacheSize,
+ [=, &melSpec](std::vector<int16_t>& audioDataWindow) {
+ return melSpec.ComputeMelSpec(audioDataWindow,
+ trainingMean);
+ });
+ }
+ return melSpecFeatureCalc;
+ }
+
+} /* namespace app */
+} /* namespace arm */