summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorRichard Burton <richard.burton@arm.com>2021-11-10 16:27:14 +0000
committerRichard <richard.burton@arm.com>2021-11-10 16:34:16 +0000
commit005534664e192cf909a11435c4bc4696b1f4c51f (patch)
treef8314bd284561e1f0ff68fc393ee22d0318ae162 /source
parentdee53bc7769d6201ec27deea4405c0df6c9b0623 (diff)
downloadml-embedded-evaluation-kit-005534664e192cf909a11435c4bc4696b1f4c51f.tar.gz
MLECO-2354 MLECO-2355 MLECO-2356: Moving noise reduction to public repository
* Use RNNoise model from PMZ * Add Noise reduction use-case Signed-off-by: Richard burton <richard.burton@arm.com> Change-Id: Ia8cc7ef102e22a5ff8bfbd3833594a4905a66057
Diffstat (limited to 'source')
-rw-r--r--source/use_case/noise_reduction/include/RNNoiseModel.hpp82
-rw-r--r--source/use_case/noise_reduction/include/RNNoiseProcess.hpp337
-rw-r--r--source/use_case/noise_reduction/include/UseCaseHandler.hpp97
-rw-r--r--source/use_case/noise_reduction/src/MainLoop.cc129
-rw-r--r--source/use_case/noise_reduction/src/RNNoiseModel.cc111
-rw-r--r--source/use_case/noise_reduction/src/RNNoiseProcess.cc888
-rw-r--r--source/use_case/noise_reduction/src/UseCaseHandler.cc367
-rw-r--r--source/use_case/noise_reduction/usecase.cmake110
8 files changed, 2121 insertions, 0 deletions
diff --git a/source/use_case/noise_reduction/include/RNNoiseModel.hpp b/source/use_case/noise_reduction/include/RNNoiseModel.hpp
new file mode 100644
index 0000000..f6e4510
--- /dev/null
+++ b/source/use_case/noise_reduction/include/RNNoiseModel.hpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef RNNOISE_MODEL_HPP
+#define RNNOISE_MODEL_HPP
+
+#include "Model.hpp"
+
+extern const uint32_t g_NumInputFeatures;
+extern const uint32_t g_FrameLength;
+extern const uint32_t g_FrameStride;
+
+namespace arm {
+namespace app {
+
+ class RNNoiseModel : public Model {
+ public:
+ /**
+ * @brief Runs inference for RNNoise model.
+ *
+ * Call CopyGruStates so GRU state outputs are copied to GRU state inputs before the inference run.
+ * Run ResetGruState() method to set states to zero before starting processing logically related data.
+ * @return True if inference succeeded, False - otherwise
+ */
+ bool RunInference() override;
+
+ /**
+ * @brief Sets GRU input states to zeros.
+ * Call this method before starting processing the new sequence of logically related data.
+ */
+ void ResetGruState();
+
+ /**
+ * @brief Copy current GRU output states to input states.
+ * Call this method before starting processing the next sequence of logically related data.
+ */
+ bool CopyGruStates();
+
+ /* Which index of model outputs does the main output (gains) come from. */
+ const size_t m_indexForModelOutput = 1;
+
+ protected:
+ /** @brief Gets the reference to op resolver interface class. */
+ const tflite::MicroOpResolver& GetOpResolver() override;
+
+ /** @brief Adds operations to the op resolver instance. */
+ bool EnlistOperations() override;
+
+ const uint8_t* ModelPointer() override;
+
+ size_t ModelSize() override;
+
+ /*
+ Each inference after the first needs to copy 3 GRU states from a output index to input index (model dependent):
+ 0 -> 3, 2 -> 2, 3 -> 1
+ */
+ const std::vector<std::pair<size_t, size_t>> m_gruStateMap = {{0,3}, {2, 2}, {3, 1}};
+ private:
+ /* Maximum number of individual operations that can be enlisted. */
+ static constexpr int ms_maxOpCnt = 15;
+
+ /* A mutable op resolver instance. */
+ tflite::MicroMutableOpResolver<ms_maxOpCnt> m_opResolver;
+ };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* RNNOISE_MODEL_HPP */ \ No newline at end of file
diff --git a/source/use_case/noise_reduction/include/RNNoiseProcess.hpp b/source/use_case/noise_reduction/include/RNNoiseProcess.hpp
new file mode 100644
index 0000000..3800019
--- /dev/null
+++ b/source/use_case/noise_reduction/include/RNNoiseProcess.hpp
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "PlatformMath.hpp"
+#include <cstdint>
+#include <vector>
+#include <array>
+#include <tuple>
+
+namespace arm {
+namespace app {
+namespace rnn {
+
+ using vec1D32F = std::vector<float>;
+ using vec2D32F = std::vector<vec1D32F>;
+ using arrHp = std::array<float, 2>;
+ using math::FftInstance;
+ using math::FftType;
+
+ class FrameFeatures {
+ public:
+ bool m_silence{false}; /* If frame contains silence or not. */
+ vec1D32F m_featuresVec{}; /* Calculated feature vector to feed to model. */
+ vec1D32F m_fftX{}; /* Vector of floats arranged to represent complex numbers. */
+ vec1D32F m_fftP{}; /* Vector of floats arranged to represent complex numbers. */
+ vec1D32F m_Ex{}; /* Spectral band energy for audio x. */
+ vec1D32F m_Ep{}; /* Spectral band energy for pitch p. */
+ vec1D32F m_Exp{}; /* Correlated spectral energy between x and p. */
+ };
+
+ /**
+ * @brief RNNoise pre and post processing class based on the 2018 paper from
+ * Jan-Marc Valin. Recommended reading:
+ * - https://jmvalin.ca/demo/rnnoise/
+ * - https://arxiv.org/abs/1709.08243
+ **/
+ class RNNoiseProcess {
+ /* Public interface */
+ public:
+ RNNoiseProcess();
+ ~RNNoiseProcess() = default;
+
+ /**
+ * @brief Calculates the features from a given audio buffer ready to be sent to RNNoise model.
+ * @param[in] audioData Pointer to the floating point vector
+ * with audio data (within the numerical
+ * limits of int16_t type).
+ * @param[in] audioLen Number of elements in the audio window.
+ * @param[out] features FrameFeatures object reference.
+ **/
+ void PreprocessFrame(const float* audioData,
+ size_t audioLen,
+ FrameFeatures& features);
+
+ /**
+ * @brief Use the RNNoise model output gain values with pre-processing features
+ * to generate audio with noise suppressed.
+ * @param[in] modelOutput Output gain values from model.
+ * @param[in] features Calculated features from pre-processing step.
+ * @param[out] outFrame Output frame to be populated.
+ **/
+ void PostProcessFrame(vec1D32F& modelOutput, FrameFeatures& features, vec1D32F& outFrame);
+
+
+ /* Public constants */
+ public:
+ static constexpr uint32_t FRAME_SIZE_SHIFT{2};
+ static constexpr uint32_t FRAME_SIZE{480};
+ static constexpr uint32_t WINDOW_SIZE{2 * FRAME_SIZE};
+ static constexpr uint32_t FREQ_SIZE{FRAME_SIZE + 1};
+
+ static constexpr uint32_t PITCH_MIN_PERIOD{60};
+ static constexpr uint32_t PITCH_MAX_PERIOD{768};
+ static constexpr uint32_t PITCH_FRAME_SIZE{960};
+ static constexpr uint32_t PITCH_BUF_SIZE{PITCH_MAX_PERIOD + PITCH_FRAME_SIZE};
+
+ static constexpr uint32_t NB_BANDS{22};
+ static constexpr uint32_t CEPS_MEM{8};
+ static constexpr uint32_t NB_DELTA_CEPS{6};
+
+ static constexpr uint32_t NB_FEATURES{NB_BANDS + 3*NB_DELTA_CEPS + 2};
+
+ /* Private functions */
+ private:
+
+ /**
+ * @brief Initialises the half window and DCT tables.
+ */
+ void InitTables();
+
+ /**
+ * @brief Applies a bi-quadratic filter over the audio window.
+ * @param[in] bHp Constant coefficient set b (arrHp type).
+ * @param[in] aHp Constant coefficient set a (arrHp type).
+ * @param[in,out] memHpX Coefficients populated by this function.
+ * @param[in,out] audioWindow Floating point vector with audio data.
+ **/
+ void BiQuad(
+ const arrHp& bHp,
+ const arrHp& aHp,
+ arrHp& memHpX,
+ vec1D32F& audioWindow);
+
+ /**
+ * @brief Computes features from the "filtered" audio window.
+ * @param[in] audioWindow Floating point vector with audio data.
+ * @param[out] features FrameFeatures object reference.
+ **/
+ void ComputeFrameFeatures(vec1D32F& audioWindow, FrameFeatures& features);
+
+ /**
+ * @brief Runs analysis on the audio buffer.
+ * @param[in] audioWindow Floating point vector with audio data.
+ * @param[out] fft Floating point FFT vector containing real and
+ * imaginary pairs of elements. NOTE: this vector
+ * does not contain the mirror image (conjugates)
+ * part of the spectrum.
+ * @param[out] energy Computed energy for each band in the Bark scale.
+ * @param[out] analysisMem Buffer sequentially, but partially,
+ * populated with new audio data.
+ **/
+ void FrameAnalysis(
+ const vec1D32F& audioWindow,
+ vec1D32F& fft,
+ vec1D32F& energy,
+ vec1D32F& analysisMem);
+
+ /**
+ * @brief Applies the window function, in-place, over the given
+ * floating point buffer.
+ * @param[in,out] x Buffer the window will be applied to.
+ **/
+ void ApplyWindow(vec1D32F& x);
+
+ /**
+ * @brief Computes the FFT for a given vector.
+ * @param[in] x Vector to compute the FFT from.
+ * @param[out] fft Floating point FFT vector containing real and
+ * imaginary pairs of elements. NOTE: this vector
+ * does not contain the mirror image (conjugates)
+ * part of the spectrum.
+ **/
+ void ForwardTransform(
+ vec1D32F& x,
+ vec1D32F& fft);
+
+ /**
+ * @brief Computes band energy for each of the 22 Bark scale bands.
+ * @param[in] fft_X FFT spectrum (as computed by ForwardTransform).
+ * @param[out] bandE Vector with 22 elements populated with energy for
+ * each band.
+ **/
+ void ComputeBandEnergy(const vec1D32F& fft_X, vec1D32F& bandE);
+
+ /**
+ * @brief Computes band energy correlation.
+ * @param[in] X FFT vector X.
+ * @param[in] P FFT vector P.
+ * @param[out] bandC Vector with 22 elements populated with band energy
+ * correlation for the two input FFT vectors.
+ **/
+ void ComputeBandCorr(const vec1D32F& X, const vec1D32F& P, vec1D32F& bandC);
+
+ /**
+ * @brief Performs pitch auto-correlation for a given vector for
+ * given lag.
+ * @param[in] x Input vector.
+ * @param[out] ac Auto-correlation output vector.
+ * @param[in] lag Lag value.
+ * @param[in] n Number of elements to consider for correlation
+ * computation.
+ **/
+ void AutoCorr(const vec1D32F &x,
+ vec1D32F &ac,
+ size_t lag,
+ size_t n);
+
+ /**
+ * @brief Computes pitch cross-correlation.
+ * @param[in] x Input vector 1.
+ * @param[in] y Input vector 2.
+ * @param[out] ac Cross-correlation output vector.
+ * @param[in] len Number of elements to consider for correlation.
+ * computation.
+ * @param[in] maxPitch Maximum pitch.
+ **/
+ void PitchXCorr(
+ const vec1D32F& x,
+ const vec1D32F& y,
+ vec1D32F& ac,
+ size_t len,
+ size_t maxPitch);
+
+ /**
+ * @brief Computes "Linear Predictor Coefficients".
+ * @param[in] ac Correlation vector.
+ * @param[in] p Number of elements of input vector to consider.
+ * @param[out] lpc Output coefficients vector.
+ **/
+ void LPC(const vec1D32F& ac, int32_t p, vec1D32F& lpc);
+
+ /**
+ * @brief Custom FIR implementation.
+ * @param[in] num FIR coefficient vector.
+ * @param[in] N Number of elements.
+ * @param[out] x Vector to be be processed.
+ **/
+ void Fir5(const vec1D32F& num, uint32_t N, vec1D32F& x);
+
+ /**
+ * @brief Down-sample the pitch buffer.
+ * @param[in,out] pitchBuf Pitch buffer.
+ * @param[in] pitchBufSz Buffer size.
+ **/
+ void PitchDownsample(vec1D32F& pitchBuf, size_t pitchBufSz);
+
+ /**
+ * @brief Pitch search function.
+ * @param[in] xLP Shifted pitch buffer input.
+ * @param[in] y Pitch buffer input.
+ * @param[in] len Length to search for.
+ * @param[in] maxPitch Maximum pitch.
+ * @return pitch index.
+ **/
+ int PitchSearch(vec1D32F& xLp, vec1D32F& y, uint32_t len, uint32_t maxPitch);
+
+ /**
+ * @brief Finds the "best" pitch from the buffer.
+ * @param[in] xCorr Pitch correlation vector.
+ * @param[in] y Pitch buffer input.
+ * @param[in] len Length to search for.
+ * @param[in] maxPitch Maximum pitch.
+ * @return pitch array (2 elements).
+ **/
+ arrHp FindBestPitch(vec1D32F& xCorr, vec1D32F& y, uint32_t len, uint32_t maxPitch);
+
+ /**
+ * @brief Remove pitch period doubling errors.
+ * @param[in,out] pitchBuf Pitch buffer vector.
+ * @param[in] maxPeriod Maximum period.
+ * @param[in] minPeriod Minimum period.
+ * @param[in] frameSize Frame size.
+ * @param[in] pitchIdx0_ Pitch index 0.
+ * @return pitch index.
+ **/
+ int RemoveDoubling(
+ vec1D32F& pitchBuf,
+ uint32_t maxPeriod,
+ uint32_t minPeriod,
+ uint32_t frameSize,
+ size_t pitchIdx0_);
+
+ /**
+ * @brief Computes pitch gain.
+ * @param[in] xy Single xy cross correlation value.
+ * @param[in] xx Single xx auto correlation value.
+ * @param[in] yy Single yy auto correlation value.
+ * @return Calculated pitch gain.
+ **/
+ float ComputePitchGain(float xy, float xx, float yy);
+
+ /**
+ * @brief Computes DCT vector from the given input.
+ * @param[in] input Input vector.
+ * @param[out] output Output vector with DCT coefficients.
+ **/
+ void DCT(vec1D32F& input, vec1D32F& output);
+
+ /**
+ * @brief Perform inverse fourier transform on complex spectral vector.
+ * @param[out] out Output vector.
+ * @param[in] fftXIn Vector of floats arranged to represent complex numbers interleaved.
+ **/
+ void InverseTransform(vec1D32F& out, vec1D32F& fftXIn);
+
+ /**
+ * @brief Perform pitch filtering.
+ * @param[in] features Object with pre-processing calculated frame features.
+ * @param[in] g Gain values.
+ **/
+ void PitchFilter(FrameFeatures& features, vec1D32F& g);
+
+ /**
+ * @brief Interpolate the band gain values.
+ * @param[out] g Gain values.
+ * @param[in] bandE Vector with 22 elements populated with energy for
+ * each band.
+ **/
+ void InterpBandGain(vec1D32F& g, vec1D32F& bandE);
+
+ /**
+ * @brief Create de-noised frame.
+ * @param[out] outFrame Output vector for storing the created audio frame.
+ * @param[in] fftY Gain adjusted complex spectral vector.
+ */
+ void FrameSynthesis(vec1D32F& outFrame, vec1D32F& fftY);
+
+ /* Private objects */
+ private:
+ FftInstance m_fftInstReal; /* FFT instance for real numbers */
+ FftInstance m_fftInstCmplx; /* FFT instance for complex numbers */
+ vec1D32F m_halfWindow; /* Window coefficients */
+ vec1D32F m_dctTable; /* DCT table */
+ vec1D32F m_analysisMem; /* Buffer used for frame analysis */
+ vec2D32F m_cepstralMem; /* Cepstral coefficients */
+ size_t m_memId; /* memory ID */
+ vec1D32F m_synthesisMem; /* Synthesis mem (used by post-processing) */
+ vec1D32F m_pitchBuf; /* Pitch buffer */
+ float m_lastGain; /* Last gain calculated */
+ int m_lastPeriod; /* Last period calculated */
+ arrHp m_memHpX; /* HpX coefficients. */
+ vec1D32F m_lastGVec; /* Last gain vector (used by post-processing) */
+
+ /* Constants */
+ const std::array <uint32_t, NB_BANDS> m_eband5ms {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12,
+ 14, 16, 20, 24, 28, 34, 40, 48, 60, 78, 100};
+
+ };
+
+
+} /* namespace rnn */
+} /* namspace app */
+} /* namespace arm */
diff --git a/source/use_case/noise_reduction/include/UseCaseHandler.hpp b/source/use_case/noise_reduction/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..143f2ed
--- /dev/null
+++ b/source/use_case/noise_reduction/include/UseCaseHandler.hpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef NOISE_REDUCTION_EVT_HANDLER_HPP
+#define NOISE_REDUCTION_EVT_HANDLER_HPP
+
+#include "AppContext.hpp"
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+
+ /**
+ * @brief Handles the inference event for noise reduction.
+ * @param[in] ctx pointer to the application context
+ * @param[in] runAll flag to request classification of all the available audio clips
+ * @return True or false based on execution success
+ **/
+ bool NoiseReductionHandler(ApplicationContext& ctx, bool runAll);
+
+ /**
+ * @brief Dumps the output tensors to a memory address.
+ * This functionality is required for RNNoise use case as we want to
+ * save the inference output to a file. Dumping out tensors to a
+ * memory location will allow the Arm FVP or MPS3 to extract the
+ * contents of this memory location to a file. This file could then
+ * be used by an offline post-processing script.
+ *
+ * @param[in] model reference to a model
+ * @param[in] memAddress memory address at which the dump will start
+ * @param[in] memSize maximum size (in bytes) of the dump.
+ *
+ * @return number of bytes written to memory.
+ */
+ size_t DumpOutputTensorsToMemory(Model& model, uint8_t* memAddress,
+ size_t memSize);
+
+ /**
+ * @brief Dumps the audio file header.
+ * This functionality is required for RNNoise use case as we want to
+ * save the inference output to a file. Dumping out the header to a
+ * memory location will allow the Arm FVP or MPS3 to extract the
+ * contents of this memory location to a file.
+ * The header contains the following information
+ * int32_t filenameLength: filename length
+ * uint8_t[] filename: the string containing the file name (without trailing \0)
+ * int32_t dumpSizeByte: audiofile buffer size in bytes
+ *
+ * @param[in] filename the file name
+ * @param[in] dumpSize the size of the audio file (int elements)
+ * @param[in] memAddress memory address at which the dump will start
+ * @param[in] memSize maximum size (in bytes) of the dump.
+ *
+ * @return number of bytes written to memory.
+ */
+ size_t DumpDenoisedAudioHeader(const char* filename, size_t dumpSize,
+ uint8_t* memAddress, size_t memSize);
+
+ /**
+ * @brief Write a EOF marker at the end of the dump memory.
+ *
+ * @param[in] memAddress memory address at which the dump will start
+ * @param[in] memSize maximum size (in bytes) of the dump.
+ *
+ * @return number of bytes written to memory.
+ */
+ size_t DumpDenoisedAudioFooter(uint8_t *memAddress, size_t memSize);
+
+ /**
+ * @brief Dump the audio data to the memory
+ *
+ * @param[in] audioFrame The vector containg the audio data
+ * @param[in] memAddress memory address at which the dump will start
+ * @param[in] memSize maximum size (in bytes) of the dump.
+ *
+ * @return number of bytes written to memory.
+ */
+ size_t DumpOutputDenoisedAudioFrame(const std::vector<int16_t> &audioFrame,
+ uint8_t *memAddress, size_t memSize);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* NOISE_REDUCTION_EVT_HANDLER_HPP */ \ No newline at end of file
diff --git a/source/use_case/noise_reduction/src/MainLoop.cc b/source/use_case/noise_reduction/src/MainLoop.cc
new file mode 100644
index 0000000..ee0a61b
--- /dev/null
+++ b/source/use_case/noise_reduction/src/MainLoop.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h" /* Brings in platform definitions. */
+#include "UseCaseHandler.hpp" /* Handlers for different user options. */
+#include "UseCaseCommonUtils.hpp" /* Utils functions. */
+#include "RNNoiseModel.hpp" /* Model class for running inference. */
+#include "InputFiles.hpp" /* For input audio clips. */
+#include "RNNoiseProcess.hpp" /* Pre-processing class */
+
+enum opcodes
+{
+ MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector. */
+ MENU_OPT_RUN_INF_CHOSEN, /* Run on a user provided vector index. */
+ MENU_OPT_RUN_INF_ALL, /* Run inference on all. */
+ MENU_OPT_SHOW_MODEL_INFO, /* Show model info. */
+ MENU_OPT_LIST_AUDIO_CLIPS /* List the current baked audio clip features. */
+};
+
+static void DisplayMenu()
+{
+ printf("\n\n");
+ printf("User input required\n");
+ printf("Enter option number from:\n\n");
+ printf(" %u. Run noise reduction on the next WAV\n", MENU_OPT_RUN_INF_NEXT);
+ printf(" %u. Run noise reduction on a WAV at chosen index\n", MENU_OPT_RUN_INF_CHOSEN);
+ printf(" %u. Run noise reduction on all WAVs\n", MENU_OPT_RUN_INF_ALL);
+ printf(" %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO);
+ printf(" %u. List audio clips\n\n", MENU_OPT_LIST_AUDIO_CLIPS);
+ printf(" Choice: ");
+ fflush(stdout);
+}
+
+static bool SetAppCtxClipIdx(arm::app::ApplicationContext& ctx, uint32_t idx)
+{
+ if (idx >= NUMBER_OF_FILES) {
+ printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
+ idx, NUMBER_OF_FILES);
+ return false;
+ }
+ ctx.Set<uint32_t>("clipIndex", idx);
+ return true;
+}
+
+void main_loop(hal_platform& platform)
+{
+ arm::app::RNNoiseModel model; /* Model wrapper object. */
+
+ bool executionSuccessful = true;
+ constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+
+ /* Load the model. */
+ if (!model.Init()) {
+ printf_err("Failed to initialise model\n");
+ return;
+ }
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ arm::app::Profiler profiler{&platform, "noise_reduction"};
+ caseContext.Set<arm::app::Profiler&>("profiler", profiler);
+
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
+ caseContext.Set<uint32_t>("frameLength", g_FrameLength);
+ caseContext.Set<uint32_t>("frameStride", g_FrameStride);
+ caseContext.Set<arm::app::RNNoiseModel&>("model", model);
+ SetAppCtxClipIdx(caseContext, 0);
+
+#if defined(MEM_DUMP_BASE_ADDR) && defined(MPS3_PLATFORM)
+ /* For this use case, for valid targets, we dump contents
+ * of the output tensor to a certain location in memory to
+ * allow offline tools to pick this data up. */
+ constexpr size_t memDumpMaxLen = MEM_DUMP_LEN;
+ uint8_t* memDumpBaseAddr = reinterpret_cast<uint8_t *>(MEM_DUMP_BASE_ADDR);
+ size_t memDumpBytesWritten = 0;
+ caseContext.Set<size_t>("MEM_DUMP_LEN", memDumpMaxLen);
+ caseContext.Set<uint8_t*>("MEM_DUMP_BASE_ADDR", memDumpBaseAddr);
+ caseContext.Set<size_t*>("MEM_DUMP_BYTE_WRITTEN", &memDumpBytesWritten);
+#endif /* defined(MEM_DUMP_BASE_ADDR) && defined(MPS3_PLATFORM) */
+ /* Loop. */
+ do {
+ int menuOption = MENU_OPT_RUN_INF_NEXT;
+
+ if (bUseMenu) {
+ DisplayMenu();
+ menuOption = arm::app::ReadUserInputAsInt(platform);
+ printf("\n");
+ }
+ switch (menuOption) {
+ case MENU_OPT_RUN_INF_NEXT:
+ executionSuccessful = NoiseReductionHandler(caseContext, false);
+ break;
+ case MENU_OPT_RUN_INF_CHOSEN: {
+ printf(" Enter the audio clip IFM index [0, %d]: ", NUMBER_OF_FILES-1);
+ auto clipIndex = static_cast<uint32_t>(arm::app::ReadUserInputAsInt(platform));
+ SetAppCtxClipIdx(caseContext, clipIndex);
+ executionSuccessful = NoiseReductionHandler(caseContext, false);
+ break;
+ }
+ case MENU_OPT_RUN_INF_ALL:
+ executionSuccessful = NoiseReductionHandler(caseContext, true);
+ break;
+ case MENU_OPT_SHOW_MODEL_INFO:
+ executionSuccessful = model.ShowModelInfoHandler();
+ break;
+ case MENU_OPT_LIST_AUDIO_CLIPS:
+ executionSuccessful = ListFilesHandler(caseContext);
+ break;
+ default:
+ printf("Incorrect choice, try again.");
+ break;
+ }
+ } while (executionSuccessful && bUseMenu);
+ info("Main loop terminated.\n");
+} \ No newline at end of file
diff --git a/source/use_case/noise_reduction/src/RNNoiseModel.cc b/source/use_case/noise_reduction/src/RNNoiseModel.cc
new file mode 100644
index 0000000..be0f369
--- /dev/null
+++ b/source/use_case/noise_reduction/src/RNNoiseModel.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "RNNoiseModel.hpp"
+
+#include "hal.h"
+
+const tflite::MicroOpResolver& arm::app::RNNoiseModel::GetOpResolver()
+{
+ return this->m_opResolver;
+}
+
+bool arm::app::RNNoiseModel::EnlistOperations()
+{
+ this->m_opResolver.AddUnpack();
+ this->m_opResolver.AddFullyConnected();
+ this->m_opResolver.AddSplit();
+ this->m_opResolver.AddSplitV();
+ this->m_opResolver.AddAdd();
+ this->m_opResolver.AddLogistic();
+ this->m_opResolver.AddMul();
+ this->m_opResolver.AddSub();
+ this->m_opResolver.AddTanh();
+ this->m_opResolver.AddPack();
+ this->m_opResolver.AddReshape();
+ this->m_opResolver.AddQuantize();
+ this->m_opResolver.AddConcatenation();
+ this->m_opResolver.AddRelu();
+
+#if defined(ARM_NPU)
+ if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
+ info("Added %s support to op resolver\n",
+ tflite::GetString_ETHOSU());
+ } else {
+ printf_err("Failed to add Arm NPU support to op resolver.");
+ return false;
+ }
+#endif /* ARM_NPU */
+ return true;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::RNNoiseModel::ModelPointer()
+{
+ return GetModelPointer();
+}
+
+extern size_t GetModelLen();
+size_t arm::app::RNNoiseModel::ModelSize()
+{
+ return GetModelLen();
+}
+
+bool arm::app::RNNoiseModel::RunInference()
+{
+ return Model::RunInference();
+}
+
+void arm::app::RNNoiseModel::ResetGruState()
+{
+ for (auto& stateMapping: this->m_gruStateMap) {
+ TfLiteTensor* inputGruStateTensor = this->GetInputTensor(stateMapping.second);
+ auto* inputGruState = tflite::GetTensorData<int8_t>(inputGruStateTensor);
+ /* Initial value of states is 0, but this is affected by quantization zero point. */
+ auto quantParams = arm::app::GetTensorQuantParams(inputGruStateTensor);
+ memset(inputGruState, quantParams.offset, inputGruStateTensor->bytes);
+ }
+}
+
+bool arm::app::RNNoiseModel::CopyGruStates()
+{
+ std::vector<std::pair<size_t, std::vector<int8_t>>> tempOutGruStates;
+ /* Saving output states before copying them to input states to avoid output states modification in the tensor.
+ * tflu shares input and output tensors memory, thus writing to input tensor can change output tensor values. */
+ for (auto& stateMapping: this->m_gruStateMap) {
+ TfLiteTensor* outputGruStateTensor = this->GetOutputTensor(stateMapping.first);
+ std::vector<int8_t> tempOutGruState(outputGruStateTensor->bytes);
+ auto* outGruState = tflite::GetTensorData<int8_t>(outputGruStateTensor);
+ memcpy(tempOutGruState.data(), outGruState, outputGruStateTensor->bytes);
+ /* Index of the input tensor and the data to copy. */
+ tempOutGruStates.emplace_back(stateMapping.second, std::move(tempOutGruState));
+ }
+ /* Updating input GRU states with saved GRU output states. */
+ for (auto& stateMapping: tempOutGruStates) {
+ auto outputGruStateTensorData = stateMapping.second;
+ TfLiteTensor* inputGruStateTensor = this->GetInputTensor(stateMapping.first);
+ if (outputGruStateTensorData.size() != inputGruStateTensor->bytes) {
+ printf_err("Unexpected number of bytes for GRU state mapping. Input = %zuz, output = %zuz.\n",
+ inputGruStateTensor->bytes,
+ outputGruStateTensorData.size());
+ return false;
+ }
+ auto* inputGruState = tflite::GetTensorData<int8_t>(inputGruStateTensor);
+ auto* outGruState = outputGruStateTensorData.data();
+ memcpy(inputGruState, outGruState, inputGruStateTensor->bytes);
+ }
+ return true;
+} \ No newline at end of file
diff --git a/source/use_case/noise_reduction/src/RNNoiseProcess.cc b/source/use_case/noise_reduction/src/RNNoiseProcess.cc
new file mode 100644
index 0000000..d9a7b35
--- /dev/null
+++ b/source/use_case/noise_reduction/src/RNNoiseProcess.cc
@@ -0,0 +1,888 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "RNNoiseProcess.hpp"
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+
+namespace arm {
+namespace app {
+namespace rnn {
+
+#define VERIFY(x) \
+do { \
+ if (!(x)) { \
+ printf_err("Assert failed:" #x "\n"); \
+ exit(1); \
+ } \
+} while(0)
+
+RNNoiseProcess::RNNoiseProcess() :
+ m_halfWindow(FRAME_SIZE, 0),
+ m_dctTable(NB_BANDS * NB_BANDS),
+ m_analysisMem(FRAME_SIZE, 0),
+ m_cepstralMem(CEPS_MEM, vec1D32F(NB_BANDS, 0)),
+ m_memId{0},
+ m_synthesisMem(FRAME_SIZE, 0),
+ m_pitchBuf(PITCH_BUF_SIZE, 0),
+ m_lastGain{0.0},
+ m_lastPeriod{0},
+ m_memHpX{},
+ m_lastGVec(NB_BANDS, 0)
+{
+ constexpr uint32_t numFFt = 2 * FRAME_SIZE;
+ static_assert(numFFt != 0, "Num FFT can't be 0");
+
+ math::MathUtils::FftInitF32(numFFt, this->m_fftInstReal, FftType::real);
+ math::MathUtils::FftInitF32(numFFt, this->m_fftInstCmplx, FftType::complex);
+ this->InitTables();
+}
+
+void RNNoiseProcess::PreprocessFrame(const float* audioData,
+ const size_t audioLen,
+ FrameFeatures& features)
+{
+ /* Note audioWindow is modified in place */
+ const arrHp aHp {-1.99599, 0.99600 };
+ const arrHp bHp {-2.00000, 1.00000 };
+
+ vec1D32F audioWindow{audioData, audioData + audioLen};
+
+ this->BiQuad(bHp, aHp, this->m_memHpX, audioWindow);
+ this->ComputeFrameFeatures(audioWindow, features);
+}
+
+void RNNoiseProcess::PostProcessFrame(vec1D32F& modelOutput, FrameFeatures& features, vec1D32F& outFrame)
+{
+ std::vector<float> g = modelOutput; /* Gain values. */
+ std::vector<float> gf(FREQ_SIZE, 0);
+
+ if (!features.m_silence) {
+ PitchFilter(features, g);
+ for (size_t i = 0; i < NB_BANDS; i++) {
+ float alpha = .6f;
+ g[i] = std::max(g[i], alpha * m_lastGVec[i]);
+ m_lastGVec[i] = g[i];
+ }
+ InterpBandGain(gf, g);
+ for (size_t i = 0; i < FREQ_SIZE; i++) {
+ features.m_fftX[2 * i] *= gf[i]; /* Real. */
+ features.m_fftX[2 * i + 1] *= gf[i]; /*imaginary. */
+
+ }
+
+ }
+
+ FrameSynthesis(outFrame, features.m_fftX);
+}
+
+void RNNoiseProcess::InitTables()
+{
+ constexpr float pi = M_PI;
+ constexpr float halfPi = M_PI / 2;
+ constexpr float halfPiOverFrameSz = halfPi/FRAME_SIZE;
+
+ for (uint32_t i = 0; i < FRAME_SIZE; i++) {
+ const float sinVal = math::MathUtils::SineF32(halfPiOverFrameSz * (i + 0.5f));
+ m_halfWindow[i] = math::MathUtils::SineF32(halfPi * sinVal * sinVal);
+ }
+
+ for (uint32_t i = 0; i < NB_BANDS; i++) {
+ for (uint32_t j = 0; j < NB_BANDS; j++) {
+ m_dctTable[i * NB_BANDS + j] = math::MathUtils::CosineF32((i + 0.5f) * j * pi / NB_BANDS);
+ }
+ m_dctTable[i * NB_BANDS] *= math::MathUtils::SqrtF32(0.5f);
+ }
+}
+
+void RNNoiseProcess::BiQuad(
+ const arrHp& bHp,
+ const arrHp& aHp,
+ arrHp& memHpX,
+ vec1D32F& audioWindow)
+{
+ for (float& audioElement : audioWindow) {
+ const auto xi = audioElement;
+ const auto yi = audioElement + memHpX[0];
+ memHpX[0] = memHpX[1] + (bHp[0] * xi - aHp[0] * yi);
+ memHpX[1] = (bHp[1] * xi - aHp[1] * yi);
+ audioElement = yi;
+ }
+}
+
+void RNNoiseProcess::ComputeFrameFeatures(vec1D32F& audioWindow,
+ FrameFeatures& features)
+{
+ this->FrameAnalysis(audioWindow,
+ features.m_fftX,
+ features.m_Ex,
+ this->m_analysisMem);
+
+ float E = 0.0;
+
+ vec1D32F Ly(NB_BANDS, 0);
+ vec1D32F p(WINDOW_SIZE, 0);
+ vec1D32F pitchBuf(PITCH_BUF_SIZE >> 1, 0);
+
+ VERIFY(PITCH_BUF_SIZE >= this->m_pitchBuf.size());
+ std::copy_n(this->m_pitchBuf.begin() + FRAME_SIZE,
+ PITCH_BUF_SIZE - FRAME_SIZE,
+ this->m_pitchBuf.begin());
+
+ VERIFY(FRAME_SIZE <= audioWindow.size() && PITCH_BUF_SIZE > FRAME_SIZE);
+ std::copy_n(audioWindow.begin(),
+ FRAME_SIZE,
+ this->m_pitchBuf.begin() + PITCH_BUF_SIZE - FRAME_SIZE);
+
+ this->PitchDownsample(pitchBuf, PITCH_BUF_SIZE);
+
+ VERIFY(pitchBuf.size() > PITCH_MAX_PERIOD/2);
+ vec1D32F xLp(pitchBuf.size() - PITCH_MAX_PERIOD/2, 0);
+ std::copy_n(pitchBuf.begin() + PITCH_MAX_PERIOD/2, xLp.size(), xLp.begin());
+
+ int pitchIdx = this->PitchSearch(xLp, pitchBuf,
+ PITCH_FRAME_SIZE, (PITCH_MAX_PERIOD - (3*PITCH_MIN_PERIOD)));
+
+ pitchIdx = this->RemoveDoubling(
+ pitchBuf,
+ PITCH_MAX_PERIOD,
+ PITCH_MIN_PERIOD,
+ PITCH_FRAME_SIZE,
+ PITCH_MAX_PERIOD - pitchIdx);
+
+ size_t stIdx = PITCH_BUF_SIZE - WINDOW_SIZE - pitchIdx;
+ VERIFY((static_cast<int>(PITCH_BUF_SIZE) - static_cast<int>(WINDOW_SIZE) - pitchIdx) >= 0);
+ std::copy_n(this->m_pitchBuf.begin() + stIdx, WINDOW_SIZE, p.begin());
+
+ this->ApplyWindow(p);
+ this->ForwardTransform(p, features.m_fftP);
+ this->ComputeBandEnergy(features.m_fftP, features.m_Ep);
+ this->ComputeBandCorr(features.m_fftX, features.m_fftP, features.m_Exp);
+
+ for (uint32_t i = 0 ; i < NB_BANDS; ++i) {
+ features.m_Exp[i] /= math::MathUtils::SqrtF32(
+ 0.001f + features.m_Ex[i] * features.m_Ep[i]);
+ }
+
+ vec1D32F dctVec(NB_BANDS, 0);
+ this->DCT(features.m_Exp, dctVec);
+
+ features.m_featuresVec = vec1D32F (NB_FEATURES, 0);
+ for (uint32_t i = 0; i < NB_DELTA_CEPS; ++i) {
+ features.m_featuresVec[NB_BANDS + 2*NB_DELTA_CEPS + i] = dctVec[i];
+ }
+
+ features.m_featuresVec[NB_BANDS + 2*NB_DELTA_CEPS] -= 1.3;
+ features.m_featuresVec[NB_BANDS + 2*NB_DELTA_CEPS + 1] -= 0.9;
+ features.m_featuresVec[NB_BANDS + 3*NB_DELTA_CEPS] = 0.01 * (static_cast<int>(pitchIdx) - 300);
+
+ float logMax = -2.f;
+ float follow = -2.f;
+ for (uint32_t i = 0; i < NB_BANDS; ++i) {
+ Ly[i] = log10f(1e-2f + features.m_Ex[i]);
+ Ly[i] = std::max<float>(logMax - 7, std::max<float>(follow - 1.5, Ly[i]));
+ logMax = std::max<float>(logMax, Ly[i]);
+ follow = std::max<float>(follow - 1.5, Ly[i]);
+ E += features.m_Ex[i];
+ }
+
+ /* If there's no audio avoid messing up the state. */
+ features.m_silence = true;
+ if (E < 0.04) {
+ return;
+ } else {
+ features.m_silence = false;
+ }
+
+ this->DCT(Ly, features.m_featuresVec);
+ features.m_featuresVec[0] -= 12.0;
+ features.m_featuresVec[1] -= 4.0;
+
+ VERIFY(CEPS_MEM > 2);
+ uint32_t stIdx1 = this->m_memId < 1 ? CEPS_MEM + this->m_memId - 1 : this->m_memId - 1;
+ uint32_t stIdx2 = this->m_memId < 2 ? CEPS_MEM + this->m_memId - 2 : this->m_memId - 2;
+
+ auto ceps1 = this->m_cepstralMem[stIdx1];
+ auto ceps2 = this->m_cepstralMem[stIdx2];
+
+ /* Ceps 0 */
+ for (uint32_t i = 0; i < NB_BANDS; ++i) {
+ this->m_cepstralMem[this->m_memId][i] = features.m_featuresVec[i];
+ }
+
+ for (uint32_t i = 0; i < NB_DELTA_CEPS; ++i) {
+ features.m_featuresVec[i] = this->m_cepstralMem[this->m_memId][i] + ceps1[i] + ceps2[i];
+ features.m_featuresVec[NB_BANDS + i] = this->m_cepstralMem[this->m_memId][i] - ceps2[i];
+ features.m_featuresVec[NB_BANDS + NB_DELTA_CEPS + i] =
+ this->m_cepstralMem[this->m_memId][i] - 2 * ceps1[i] + ceps2[i];
+ }
+
+ /* Spectral variability features. */
+ this->m_memId += 1;
+ if (this->m_memId == CEPS_MEM) {
+ this->m_memId = 0;
+ }
+
+ float specVariability = 0.f;
+
+ VERIFY(this->m_cepstralMem.size() >= CEPS_MEM);
+ for (size_t i = 0; i < CEPS_MEM; ++i) {
+ float minDist = 1e15;
+ for (size_t j = 0; j < CEPS_MEM; ++j) {
+ float dist = 0.f;
+ for (size_t k = 0; k < NB_BANDS; ++k) {
+ VERIFY(this->m_cepstralMem[i].size() >= NB_BANDS);
+ auto tmp = this->m_cepstralMem[i][k] - this->m_cepstralMem[j][k];
+ dist += tmp * tmp;
+ }
+
+ if (j != i) {
+ minDist = std::min<float>(minDist, dist);
+ }
+ }
+ specVariability += minDist;
+ }
+
+ VERIFY(features.m_featuresVec.size() >= NB_BANDS + 3 * NB_DELTA_CEPS + 1);
+ features.m_featuresVec[NB_BANDS + 3 * NB_DELTA_CEPS + 1] = specVariability / CEPS_MEM - 2.1;
+}
+
+void RNNoiseProcess::FrameAnalysis(
+ const vec1D32F& audioWindow,
+ vec1D32F& fft,
+ vec1D32F& energy,
+ vec1D32F& analysisMem)
+{
+ vec1D32F x(WINDOW_SIZE, 0);
+
+ /* Move old audio down and populate end with latest audio window. */
+ VERIFY(x.size() >= FRAME_SIZE && analysisMem.size() >= FRAME_SIZE);
+ VERIFY(audioWindow.size() >= FRAME_SIZE);
+
+ std::copy_n(analysisMem.begin(), FRAME_SIZE, x.begin());
+ std::copy_n(audioWindow.begin(), x.size() - FRAME_SIZE, x.begin() + FRAME_SIZE);
+ std::copy_n(audioWindow.begin(), FRAME_SIZE, analysisMem.begin());
+
+ this->ApplyWindow(x);
+
+ /* Calculate FFT. */
+ ForwardTransform(x, fft);
+
+ /* Compute band energy. */
+ ComputeBandEnergy(fft, energy);
+}
+
+void RNNoiseProcess::ApplyWindow(vec1D32F& x)
+{
+ if (WINDOW_SIZE != x.size()) {
+ printf_err("Invalid size for vector to be windowed\n");
+ return;
+ }
+
+ VERIFY(this->m_halfWindow.size() >= FRAME_SIZE);
+
+ /* Multiply input by sinusoidal function. */
+ for (size_t i = 0; i < FRAME_SIZE; i++) {
+ x[i] *= this->m_halfWindow[i];
+ x[WINDOW_SIZE - 1 - i] *= this->m_halfWindow[i];
+ }
+}
+
+void RNNoiseProcess::ForwardTransform(
+ vec1D32F& x,
+ vec1D32F& fft)
+{
+ /* The input vector can be modified by the fft function. */
+ fft.reserve(x.size() + 2);
+ fft.resize(x.size() + 2, 0);
+ math::MathUtils::FftF32(x, fft, this->m_fftInstReal);
+
+ /* Normalise. */
+ for (auto& f : fft) {
+ f /= this->m_fftInstReal.m_fftLen;
+ }
+
+ /* Place the last freq element correctly */
+ fft[fft.size()-2] = fft[1];
+ fft[1] = 0;
+
+ /* NOTE: We don't truncate out FFT vector as it already contains only the
+ * first half of the FFT's. The conjugates are not present. */
+}
+
+void RNNoiseProcess::ComputeBandEnergy(const vec1D32F& fftX, vec1D32F& bandE)
+{
+ bandE = vec1D32F(NB_BANDS, 0);
+
+ VERIFY(this->m_eband5ms.size() >= NB_BANDS);
+ for (uint32_t i = 0; i < NB_BANDS - 1; i++) {
+ const auto bandSize = (this->m_eband5ms[i + 1] - this->m_eband5ms[i])
+ << FRAME_SIZE_SHIFT;
+
+ for (uint32_t j = 0; j < bandSize; j++) {
+ const auto frac = static_cast<float>(j) / bandSize;
+ const auto idx = (this->m_eband5ms[i] << FRAME_SIZE_SHIFT) + j;
+
+ auto tmp = fftX[2 * idx] * fftX[2 * idx]; /* Real part */
+ tmp += fftX[2 * idx + 1] * fftX[2 * idx + 1]; /* Imaginary part */
+
+ bandE[i] += (1 - frac) * tmp;
+ bandE[i + 1] += frac * tmp;
+ }
+ }
+ bandE[0] *= 2;
+ bandE[NB_BANDS - 1] *= 2;
+}
+
+void RNNoiseProcess::ComputeBandCorr(const vec1D32F& X, const vec1D32F& P, vec1D32F& bandC)
+{
+ bandC = vec1D32F(NB_BANDS, 0);
+ VERIFY(this->m_eband5ms.size() >= NB_BANDS);
+
+ for (uint32_t i = 0; i < NB_BANDS - 1; i++) {
+ const auto bandSize = (this->m_eband5ms[i + 1] - this->m_eband5ms[i]) << FRAME_SIZE_SHIFT;
+
+ for (uint32_t j = 0; j < bandSize; j++) {
+ const auto frac = static_cast<float>(j) / bandSize;
+ const auto idx = (this->m_eband5ms[i] << FRAME_SIZE_SHIFT) + j;
+
+ auto tmp = X[2 * idx] * P[2 * idx]; /* Real part */
+ tmp += X[2 * idx + 1] * P[2 * idx + 1]; /* Imaginary part */
+
+ bandC[i] += (1 - frac) * tmp;
+ bandC[i + 1] += frac * tmp;
+ }
+ }
+ bandC[0] *= 2;
+ bandC[NB_BANDS - 1] *= 2;
+}
+
+void RNNoiseProcess::DCT(vec1D32F& input, vec1D32F& output)
+{
+ VERIFY(this->m_dctTable.size() >= NB_BANDS * NB_BANDS);
+ for (uint32_t i = 0; i < NB_BANDS; ++i) {
+ float sum = 0;
+
+ for (uint32_t j = 0, k = 0; j < NB_BANDS; ++j, k += NB_BANDS) {
+ sum += input[j] * this->m_dctTable[k + i];
+ }
+ output[i] = sum * math::MathUtils::SqrtF32(2.0/22);
+ }
+}
+
+void RNNoiseProcess::PitchDownsample(vec1D32F& pitchBuf, size_t pitchBufSz) {
+ for (size_t i = 1; i < (pitchBufSz >> 1); ++i) {
+ pitchBuf[i] = 0.5 * (
+ 0.5 * (this->m_pitchBuf[2 * i - 1] + this->m_pitchBuf[2 * i + 1])
+ + this->m_pitchBuf[2 * i]);
+ }
+
+ pitchBuf[0] = 0.5*(0.5*(this->m_pitchBuf[1]) + this->m_pitchBuf[0]);
+
+ vec1D32F ac(5, 0);
+ size_t numLags = 4;
+
+ this->AutoCorr(pitchBuf, ac, numLags, pitchBufSz >> 1);
+
+ /* Noise floor -40db */
+ ac[0] *= 1.0001;
+
+ /* Lag windowing. */
+ for (size_t i = 1; i < numLags + 1; ++i) {
+ ac[i] -= ac[i] * (0.008 * i) * (0.008 * i);
+ }
+
+ vec1D32F lpc(numLags, 0);
+ this->LPC(ac, numLags, lpc);
+
+ float tmp = 1.0;
+ for (size_t i = 0; i < numLags; ++i) {
+ tmp = 0.9f * tmp;
+ lpc[i] = lpc[i] * tmp;
+ }
+
+ vec1D32F lpc2(numLags + 1, 0);
+ float c1 = 0.8;
+
+ /* Add a zero. */
+ lpc2[0] = lpc[0] + 0.8;
+ lpc2[1] = lpc[1] + (c1 * lpc[0]);
+ lpc2[2] = lpc[2] + (c1 * lpc[1]);
+ lpc2[3] = lpc[3] + (c1 * lpc[2]);
+ lpc2[4] = (c1 * lpc[3]);
+
+ this->Fir5(lpc2, pitchBufSz >> 1, pitchBuf);
+}
+
+int RNNoiseProcess::PitchSearch(vec1D32F& xLp, vec1D32F& y, uint32_t len, uint32_t maxPitch) {
+ uint32_t lag = len + maxPitch;
+ vec1D32F xLp4(len >> 2, 0);
+ vec1D32F yLp4(lag >> 2, 0);
+ vec1D32F xCorr(maxPitch >> 1, 0);
+
+ /* Downsample by 2 again. */
+ for (size_t j = 0; j < (len >> 2); ++j) {
+ xLp4[j] = xLp[2*j];
+ }
+ for (size_t j = 0; j < (lag >> 2); ++j) {
+ yLp4[j] = y[2*j];
+ }
+
+ this->PitchXCorr(xLp4, yLp4, xCorr, len >> 2, maxPitch >> 2);
+
+ /* Coarse search with 4x decimation. */
+ arrHp bestPitch = this->FindBestPitch(xCorr, yLp4, len >> 2, maxPitch >> 2);
+
+ /* Finer search with 2x decimation. */
+ const int maxIdx = (maxPitch >> 1);
+ for (int i = 0; i < maxIdx; ++i) {
+ xCorr[i] = 0;
+ if (std::abs(i - 2*bestPitch[0]) > 2 and std::abs(i - 2*bestPitch[1]) > 2) {
+ continue;
+ }
+ float sum = 0;
+ for (size_t j = 0; j < len >> 1; ++j) {
+ sum += xLp[j] * y[i+j];
+ }
+
+ xCorr[i] = std::max(-1.0f, sum);
+ }
+
+ bestPitch = this->FindBestPitch(xCorr, y, len >> 1, maxPitch >> 1);
+
+ int offset;
+ /* Refine by pseudo-interpolation. */
+ if ( 0 < bestPitch[0] && bestPitch[0] < ((maxPitch >> 1) - 1)) {
+ float a = xCorr[bestPitch[0] - 1];
+ float b = xCorr[bestPitch[0]];
+ float c = xCorr[bestPitch[0] + 1];
+
+ if ( (c-a) > 0.7*(b-a) ) {
+ offset = 1;
+ } else if ( (a-c) > 0.7*(b-c) ) {
+ offset = -1;
+ } else {
+ offset = 0;
+ }
+ } else {
+ offset = 0;
+ }
+
+ return 2*bestPitch[0] - offset;
+}
+
+arrHp RNNoiseProcess::FindBestPitch(vec1D32F& xCorr, vec1D32F& y, uint32_t len, uint32_t maxPitch)
+{
+ float Syy = 1;
+ arrHp bestNum {-1, -1};
+ arrHp bestDen {0, 0};
+ arrHp bestPitch {0, 1};
+
+ for (size_t j = 0; j < len; ++j) {
+ Syy += (y[j] * y[j]);
+ }
+
+ for (size_t i = 0; i < maxPitch; ++i ) {
+ if (xCorr[i] > 0) {
+ float xCorr16 = xCorr[i] * 1e-12f; /* Avoid problems when squaring. */
+
+ float num = xCorr16 * xCorr16;
+ if (num*bestDen[1] > bestNum[1]*Syy) {
+ if (num*bestDen[0] > bestNum[0]*Syy) {
+ bestNum[1] = bestNum[0];
+ bestDen[1] = bestDen[0];
+ bestPitch[1] = bestPitch[0];
+ bestNum[0] = num;
+ bestDen[0] = Syy;
+ bestPitch[0] = i;
+ } else {
+ bestNum[1] = num;
+ bestDen[1] = Syy;
+ bestPitch[1] = i;
+ }
+ }
+ }
+
+ Syy += (y[i+len]*y[i+len]) - (y[i]*y[i]);
+ Syy = std::max(1.0f, Syy);
+ }
+
+ return bestPitch;
+}
+
+int RNNoiseProcess::RemoveDoubling(
+ vec1D32F& pitchBuf,
+ uint32_t maxPeriod,
+ uint32_t minPeriod,
+ uint32_t frameSize,
+ size_t pitchIdx0_)
+{
+ constexpr std::array<size_t, 16> secondCheck {0, 0, 3, 2, 3, 2, 5, 2, 3, 2, 3, 2, 5, 2, 3, 2};
+ uint32_t minPeriod0 = minPeriod;
+ float lastPeriod = static_cast<float>(this->m_lastPeriod)/2;
+ float lastGain = static_cast<float>(this->m_lastGain);
+
+ maxPeriod /= 2;
+ minPeriod /= 2;
+ pitchIdx0_ /= 2;
+ frameSize /= 2;
+ uint32_t xStart = maxPeriod;
+
+ if (pitchIdx0_ >= maxPeriod) {
+ pitchIdx0_ = maxPeriod - 1;
+ }
+
+ size_t pitchIdx = pitchIdx0_;
+ size_t pitchIdx0 = pitchIdx0_;
+
+ float xx = 0;
+ for ( size_t i = xStart; i < xStart+frameSize; ++i) {
+ xx += (pitchBuf[i] * pitchBuf[i]);
+ }
+
+ float xy = 0;
+ for ( size_t i = xStart; i < xStart+frameSize; ++i) {
+ xy += (pitchBuf[i] * pitchBuf[i-pitchIdx0]);
+ }
+
+ vec1D32F yyLookup (maxPeriod+1, 0);
+ yyLookup[0] = xx;
+ float yy = xx;
+
+ for ( size_t i = 1; i < maxPeriod+1; ++i) {
+ yy = yy + (pitchBuf[xStart-i] * pitchBuf[xStart-i]) -
+ (pitchBuf[xStart+frameSize-i] * pitchBuf[xStart+frameSize-i]);
+ yyLookup[i] = std::max(0.0f, yy);
+ }
+
+ yy = yyLookup[pitchIdx0];
+ float bestXy = xy;
+ float bestYy = yy;
+
+ float g = this->ComputePitchGain(xy, xx, yy);
+ float g0 = g;
+
+ /* Look for any pitch at pitchIndex/k. */
+ for ( size_t k = 2; k < 16; ++k) {
+ size_t pitchIdx1 = (2*pitchIdx0+k) / (2*k);
+ if (pitchIdx1 < minPeriod) {
+ break;
+ }
+
+ size_t pitchIdx1b;
+ /* Look for another strong correlation at T1b. */
+ if (k == 2) {
+ if ((pitchIdx1 + pitchIdx0) > maxPeriod) {
+ pitchIdx1b = pitchIdx0;
+ } else {
+ pitchIdx1b = pitchIdx0 + pitchIdx1;
+ }
+ } else {
+ pitchIdx1b = (2*(secondCheck[k])*pitchIdx0 + k) / (2*k);
+ }
+
+ xy = 0;
+ for ( size_t i = xStart; i < xStart+frameSize; ++i) {
+ xy += (pitchBuf[i] * pitchBuf[i-pitchIdx1]);
+ }
+
+ float xy2 = 0;
+ for ( size_t i = xStart; i < xStart+frameSize; ++i) {
+ xy2 += (pitchBuf[i] * pitchBuf[i-pitchIdx1b]);
+ }
+ xy = 0.5f * (xy + xy2);
+ yy = 0.5f * (yyLookup[pitchIdx1] + yyLookup[pitchIdx1b]);
+
+ float g1 = this->ComputePitchGain(xy, xx, yy);
+
+ float cont;
+ if (std::abs(pitchIdx1-lastPeriod) <= 1) {
+ cont = lastGain;
+ } else if (std::abs(pitchIdx1-lastPeriod) <= 2 and 5*k*k < pitchIdx0) {
+ cont = 0.5f*lastGain;
+ } else {
+ cont = 0.0f;
+ }
+
+ float thresh = std::max(0.3, 0.7*g0-cont);
+
+ /* Bias against very high pitch (very short period) to avoid false-positives
+ * due to short-term correlation */
+ if (pitchIdx1 < 3*minPeriod) {
+ thresh = std::max(0.4, 0.85*g0-cont);
+ } else if (pitchIdx1 < 2*minPeriod) {
+ thresh = std::max(0.5, 0.9*g0-cont);
+ }
+ if (g1 > thresh) {
+ bestXy = xy;
+ bestYy = yy;
+ pitchIdx = pitchIdx1;
+ g = g1;
+ }
+ }
+
+ bestXy = std::max(0.0f, bestXy);
+ float pg;
+ if (bestYy <= bestXy) {
+ pg = 1.0;
+ } else {
+ pg = bestXy/(bestYy+1);
+ }
+
+ std::array<float, 3> xCorr {0};
+ for ( size_t k = 0; k < 3; ++k ) {
+ for ( size_t i = xStart; i < xStart+frameSize; ++i) {
+ xCorr[k] += (pitchBuf[i] * pitchBuf[i-(pitchIdx+k-1)]);
+ }
+ }
+
+ size_t offset;
+ if ((xCorr[2]-xCorr[0]) > 0.7*(xCorr[1]-xCorr[0])) {
+ offset = 1;
+ } else if ((xCorr[0]-xCorr[2]) > 0.7*(xCorr[1]-xCorr[2])) {
+ offset = -1;
+ } else {
+ offset = 0;
+ }
+
+ if (pg > g) {
+ pg = g;
+ }
+
+ pitchIdx0_ = 2*pitchIdx + offset;
+
+ if (pitchIdx0_ < minPeriod0) {
+ pitchIdx0_ = minPeriod0;
+ }
+
+ this->m_lastPeriod = pitchIdx0_;
+ this->m_lastGain = pg;
+
+ return this->m_lastPeriod;
+}
+
+float RNNoiseProcess::ComputePitchGain(float xy, float xx, float yy)
+{
+ return xy / math::MathUtils::SqrtF32(1+xx*yy);
+}
+
+void RNNoiseProcess::AutoCorr(
+ const vec1D32F& x,
+ vec1D32F& ac,
+ size_t lag,
+ size_t n)
+{
+ if (n < lag) {
+ printf_err("Invalid parameters for AutoCorr\n");
+ return;
+ }
+
+ auto fastN = n - lag;
+
+ /* Auto-correlation - can be done by PlatformMath functions */
+ this->PitchXCorr(x, x, ac, fastN, lag + 1);
+
+ /* Modify auto-correlation by summing with auto-correlation for different lags. */
+ for (size_t k = 0; k < lag + 1; k++) {
+ float d = 0;
+ for (size_t i = k + fastN; i < n; i++) {
+ d += x[i] * x[i - k];
+ }
+ ac[k] += d;
+ }
+}
+
+
+void RNNoiseProcess::PitchXCorr(
+ const vec1D32F& x,
+ const vec1D32F& y,
+ vec1D32F& ac,
+ size_t len,
+ size_t maxPitch)
+{
+ for (size_t i = 0; i < maxPitch; i++) {
+ float sum = 0;
+ for (size_t j = 0; j < len; j++) {
+ sum += x[j] * y[i + j];
+ }
+ ac[i] = sum;
+ }
+}
+
+/* Linear predictor coefficients */
+void RNNoiseProcess::LPC(
+ const vec1D32F& ac,
+ int32_t p,
+ vec1D32F& lpc)
+{
+ auto error = ac[0];
+
+ if (error != 0) {
+ for (int i = 0; i < p; i++) {
+
+ /* Sum up this iteration's reflection coefficient */
+ float rr = 0;
+ for (int j = 0; j < i; j++) {
+ rr += lpc[j] * ac[i - j];
+ }
+
+ rr += ac[i + 1];
+ auto r = -rr / error;
+
+ /* Update LP coefficients and total error */
+ lpc[i] = r;
+ for (int j = 0; j < ((i + 1) >> 1); j++) {
+ auto tmp1 = lpc[j];
+ auto tmp2 = lpc[i - 1 - j];
+ lpc[j] = tmp1 + (r * tmp2);
+ lpc[i - 1 - j] = tmp2 + (r * tmp1);
+ }
+
+ error = error - (r * r * error);
+
+ /* Bail out once we get 30dB gain */
+ if (error < (0.001 * ac[0])) {
+ break;
+ }
+ }
+ }
+}
+
+void RNNoiseProcess::Fir5(
+ const vec1D32F &num,
+ uint32_t N,
+ vec1D32F &x)
+{
+ auto num0 = num[0];
+ auto num1 = num[1];
+ auto num2 = num[2];
+ auto num3 = num[3];
+ auto num4 = num[4];
+ auto mem0 = 0;
+ auto mem1 = 0;
+ auto mem2 = 0;
+ auto mem3 = 0;
+ auto mem4 = 0;
+ for (uint32_t i = 0; i < N; i++)
+ {
+ auto sum_ = x[i] + (num0 * mem0) + (num1 * mem1) +
+ (num2 * mem2) + (num3 * mem3) + (num4 * mem4);
+ mem4 = mem3;
+ mem3 = mem2;
+ mem2 = mem1;
+ mem1 = mem0;
+ mem0 = x[i];
+ x[i] = sum_;
+ }
+}
+
+void RNNoiseProcess::PitchFilter(FrameFeatures &features, vec1D32F &g) {
+ std::vector<float> r(NB_BANDS, 0);
+ std::vector<float> rf(FREQ_SIZE, 0);
+ std::vector<float> newE(NB_BANDS);
+
+ for (size_t i = 0; i < NB_BANDS; i++) {
+ if (features.m_Exp[i] > g[i]) {
+ r[i] = 1;
+ } else {
+
+
+ r[i] = std::pow(features.m_Exp[i], 2) * (1 - std::pow(g[i], 2)) /
+ (.001 + std::pow(g[i], 2) * (1 - std::pow(features.m_Exp[i], 2)));
+ }
+
+
+ r[i] = math::MathUtils::SqrtF32(std::min(1.0f, std::max(0.0f, r[i])));
+ r[i] *= math::MathUtils::SqrtF32(features.m_Ex[i] / (1e-8f + features.m_Ep[i]));
+ }
+
+ InterpBandGain(rf, r);
+ for (size_t i = 0; i < FREQ_SIZE - 1; i++) {
+ features.m_fftX[2 * i] += rf[i] * features.m_fftP[2 * i]; /* Real. */
+ features.m_fftX[2 * i + 1] += rf[i] * features.m_fftP[2 * i + 1]; /* Imaginary. */
+
+ }
+ ComputeBandEnergy(features.m_fftX, newE);
+ std::vector<float> norm(NB_BANDS);
+ std::vector<float> normf(FRAME_SIZE, 0);
+ for (size_t i = 0; i < NB_BANDS; i++) {
+ norm[i] = math::MathUtils::SqrtF32(features.m_Ex[i] / (1e-8f + newE[i]));
+ }
+
+ InterpBandGain(normf, norm);
+ for (size_t i = 0; i < FREQ_SIZE - 1; i++) {
+ features.m_fftX[2 * i] *= normf[i]; /* Real. */
+ features.m_fftX[2 * i + 1] *= normf[i]; /* Imaginary. */
+
+ }
+}
+
+void RNNoiseProcess::FrameSynthesis(vec1D32F& outFrame, vec1D32F& fftY) {
+ std::vector<float> x(WINDOW_SIZE, 0);
+ InverseTransform(x, fftY);
+ ApplyWindow(x);
+ for (size_t i = 0; i < FRAME_SIZE; i++) {
+ outFrame[i] = x[i] + m_synthesisMem[i];
+ }
+ memcpy((m_synthesisMem.data()), &x[FRAME_SIZE], FRAME_SIZE*sizeof(float));
+}
+
+void RNNoiseProcess::InterpBandGain(vec1D32F& g, vec1D32F& bandE) {
+ for (size_t i = 0; i < NB_BANDS - 1; i++) {
+ int bandSize = (m_eband5ms[i + 1] - m_eband5ms[i]) << FRAME_SIZE_SHIFT;
+ for (int j = 0; j < bandSize; j++) {
+ float frac = static_cast<float>(j) / bandSize;
+ g[(m_eband5ms[i] << FRAME_SIZE_SHIFT) + j] = (1 - frac) * bandE[i] + frac * bandE[i + 1];
+ }
+ }
+}
+
+void RNNoiseProcess::InverseTransform(vec1D32F& out, vec1D32F& fftXIn) {
+
+ std::vector<float> x(WINDOW_SIZE * 2); /* This is complex. */
+ vec1D32F newFFT; /* This is complex. */
+
+ size_t i;
+ for (i = 0; i < FREQ_SIZE * 2; i++) {
+ x[i] = fftXIn[i];
+ }
+ for (i = FREQ_SIZE; i < WINDOW_SIZE; i++) {
+ x[2 * i] = x[2 * (WINDOW_SIZE - i)]; /* Real. */
+ x[2 * i + 1] = -x[2 * (WINDOW_SIZE - i) + 1]; /* Imaginary. */
+ }
+
+ constexpr uint32_t numFFt = 2 * FRAME_SIZE;
+ static_assert(numFFt != 0);
+
+ vec1D32F fftOut = vec1D32F(x.size(), 0);
+ math::MathUtils::FftF32(x,fftOut, m_fftInstCmplx);
+
+ /* Normalize. */
+ for (auto &f: fftOut) {
+ f /= numFFt;
+ }
+
+ out[0] = WINDOW_SIZE * fftOut[0]; /* Real. */
+ for (i = 1; i < WINDOW_SIZE; i++) {
+ out[i] = WINDOW_SIZE * fftOut[(WINDOW_SIZE * 2) - (2 * i)]; /* Real. */
+ }
+}
+
+
+} /* namespace rnn */
+} /* namespace app */
+} /* namspace arm */
diff --git a/source/use_case/noise_reduction/src/UseCaseHandler.cc b/source/use_case/noise_reduction/src/UseCaseHandler.cc
new file mode 100644
index 0000000..12579df
--- /dev/null
+++ b/source/use_case/noise_reduction/src/UseCaseHandler.cc
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cmath>
+#include <algorithm>
+
+#include "UseCaseHandler.hpp"
+#include "hal.h"
+#include "UseCaseCommonUtils.hpp"
+#include "AudioUtils.hpp"
+#include "InputFiles.hpp"
+#include "RNNoiseModel.hpp"
+#include "RNNoiseProcess.hpp"
+
+namespace arm {
+namespace app {
+
+ /**
+ * @brief Helper function to increment current audio clip features index.
+ * @param[in,out] ctx Pointer to the application context object.
+ **/
+ static void IncrementAppCtxClipIdx(ApplicationContext& ctx);
+
+ /**
+ * @brief Quantize the given features and populate the input Tensor.
+ * @param[in] inputFeatures Vector of floating point features to quantize.
+ * @param[in] quantScale Quantization scale for the inputTensor.
+ * @param[in] quantOffset Quantization offset for the inputTensor.
+ * @param[in,out] inputTensor TFLite micro tensor to populate.
+ **/
+ static void QuantizeAndPopulateInput(rnn::vec1D32F& inputFeatures,
+ float quantScale, int quantOffset,
+ TfLiteTensor* inputTensor);
+
+ /* Noise reduction inference handler. */
+ bool NoiseReductionHandler(ApplicationContext& ctx, bool runAll)
+ {
+ constexpr uint32_t dataPsnTxtInfStartX = 20;
+ constexpr uint32_t dataPsnTxtInfStartY = 40;
+
+ /* Variables used for memory dumping. */
+ size_t memDumpMaxLen = 0;
+ uint8_t* memDumpBaseAddr = nullptr;
+ size_t undefMemDumpBytesWritten = 0;
+ size_t *pMemDumpBytesWritten = &undefMemDumpBytesWritten;
+ if (ctx.Has("MEM_DUMP_LEN") && ctx.Has("MEM_DUMP_BASE_ADDR") && ctx.Has("MEM_DUMP_BYTE_WRITTEN")) {
+ memDumpMaxLen = ctx.Get<size_t>("MEM_DUMP_LEN");
+ memDumpBaseAddr = ctx.Get<uint8_t*>("MEM_DUMP_BASE_ADDR");
+ pMemDumpBytesWritten = ctx.Get<size_t*>("MEM_DUMP_BYTE_WRITTEN");
+ }
+ std::reference_wrapper<size_t> memDumpBytesWritten = std::ref(*pMemDumpBytesWritten);
+
+ auto& platform = ctx.Get<hal_platform&>("platform");
+ platform.data_psn->clear(COLOR_BLACK);
+
+ auto& profiler = ctx.Get<Profiler&>("profiler");
+
+ /* Get model reference. */
+ auto& model = ctx.Get<RNNoiseModel&>("model");
+ if (!model.IsInited()) {
+ printf_err("Model is not initialised! Terminating processing.\n");
+ return false;
+ }
+
+ /* Populate Pre-Processing related parameters. */
+ auto audioParamsWinLen = ctx.Get<uint32_t>("frameLength");
+ auto audioParamsWinStride = ctx.Get<uint32_t>("frameStride");
+ auto nrNumInputFeatures = ctx.Get<uint32_t>("numInputFeatures");
+
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ if (nrNumInputFeatures != inputTensor->bytes) {
+ printf_err("Input features size must be equal to input tensor size."
+ " Feature size = %" PRIu32 ", Tensor size = %zu.\n",
+ nrNumInputFeatures, inputTensor->bytes);
+ return false;
+ }
+
+ TfLiteTensor* outputTensor = model.GetOutputTensor(model.m_indexForModelOutput);
+
+ /* Initial choice of index for WAV file. */
+ auto startClipIdx = ctx.Get<uint32_t>("clipIndex");
+
+ std::function<const int16_t* (const uint32_t)> audioAccessorFunc = get_audio_array;
+ if (ctx.Has("features")) {
+ audioAccessorFunc = ctx.Get<std::function<const int16_t* (const uint32_t)>>("features");
+ }
+ std::function<uint32_t (const uint32_t)> audioSizeAccessorFunc = get_audio_array_size;
+ if (ctx.Has("featureSizes")) {
+ audioSizeAccessorFunc = ctx.Get<std::function<uint32_t (const uint32_t)>>("featureSizes");
+ }
+ std::function<const char*(const uint32_t)> audioFileAccessorFunc = get_filename;
+ if (ctx.Has("featureFileNames")) {
+ audioFileAccessorFunc = ctx.Get<std::function<const char*(const uint32_t)>>("featureFileNames");
+ }
+ do{
+ auto startDumpAddress = memDumpBaseAddr + memDumpBytesWritten;
+ auto currentIndex = ctx.Get<uint32_t>("clipIndex");
+
+ /* Creating a sliding window through the audio. */
+ auto audioDataSlider = audio::SlidingWindow<const int16_t>(
+ audioAccessorFunc(currentIndex),
+ audioSizeAccessorFunc(currentIndex), audioParamsWinLen,
+ audioParamsWinStride);
+
+ info("Running inference on input feature map %" PRIu32 " => %s\n", currentIndex,
+ audioFileAccessorFunc(currentIndex));
+
+ memDumpBytesWritten += DumpDenoisedAudioHeader(audioFileAccessorFunc(currentIndex),
+ (audioDataSlider.TotalStrides() + 1) * audioParamsWinLen,
+ memDumpBaseAddr + memDumpBytesWritten,
+ memDumpMaxLen - memDumpBytesWritten);
+
+ rnn::RNNoiseProcess featureProcessor = rnn::RNNoiseProcess();
+ rnn::vec1D32F audioFrame(audioParamsWinLen);
+ rnn::vec1D32F inputFeatures(nrNumInputFeatures);
+ rnn::vec1D32F denoisedAudioFrameFloat(audioParamsWinLen);
+ std::vector<int16_t> denoisedAudioFrame(audioParamsWinLen);
+
+ std::vector<float> modelOutputFloat(outputTensor->bytes);
+ rnn::FrameFeatures frameFeatures;
+ bool resetGRU = true;
+
+ while (audioDataSlider.HasNext()) {
+ const int16_t* inferenceWindow = audioDataSlider.Next();
+ audioFrame = rnn::vec1D32F(inferenceWindow, inferenceWindow+audioParamsWinLen);
+
+ featureProcessor.PreprocessFrame(audioFrame.data(), audioParamsWinLen, frameFeatures);
+
+ /* Reset or copy over GRU states first to avoid TFLu memory overlap issues. */
+ if (resetGRU){
+ model.ResetGruState();
+ } else {
+ /* Copying gru state outputs to gru state inputs.
+ * Call ResetGruState in between the sequence of inferences on unrelated input data. */
+ model.CopyGruStates();
+ }
+
+ QuantizeAndPopulateInput(frameFeatures.m_featuresVec,
+ inputTensor->params.scale, inputTensor->params.zero_point,
+ inputTensor);
+
+ /* Strings for presentation/logging. */
+ std::string str_inf{"Running inference... "};
+
+ /* Display message on the LCD - inference running. */
+ platform.data_psn->present_data_text(
+ str_inf.c_str(), str_inf.size(),
+ dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
+
+ info("Inference %zu/%zu\n", audioDataSlider.Index() + 1, audioDataSlider.TotalStrides() + 1);
+
+ /* Run inference over this feature sliding window. */
+ profiler.StartProfiling("Inference");
+ bool success = model.RunInference();
+ profiler.StopProfiling();
+ resetGRU = false;
+
+ if (!success) {
+ return false;
+ }
+
+ /* De-quantize main model output ready for post-processing. */
+ const auto* outputData = tflite::GetTensorData<int8_t>(outputTensor);
+ auto outputQuantParams = arm::app::GetTensorQuantParams(outputTensor);
+
+ for (size_t i = 0; i < outputTensor->bytes; ++i) {
+ modelOutputFloat[i] = (static_cast<float>(outputData[i]) - outputQuantParams.offset)
+ * outputQuantParams.scale;
+ }
+
+ /* Round and cast the post-processed results for dumping to wav. */
+ featureProcessor.PostProcessFrame(modelOutputFloat, frameFeatures, denoisedAudioFrameFloat);
+ for (size_t i = 0; i < audioParamsWinLen; ++i) {
+ denoisedAudioFrame[i] = static_cast<int16_t>(std::roundf(denoisedAudioFrameFloat[i]));
+ }
+
+ /* Erase. */
+ str_inf = std::string(str_inf.size(), ' ');
+ platform.data_psn->present_data_text(
+ str_inf.c_str(), str_inf.size(),
+ dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
+
+ if (memDumpMaxLen > 0) {
+ /* Dump output tensors to memory. */
+ memDumpBytesWritten += DumpOutputDenoisedAudioFrame(
+ denoisedAudioFrame,
+ memDumpBaseAddr + memDumpBytesWritten,
+ memDumpMaxLen - memDumpBytesWritten);
+ }
+ }
+
+ if (memDumpMaxLen > 0) {
+ /* Needed to not let the compiler complain about type mismatch. */
+ size_t valMemDumpBytesWritten = memDumpBytesWritten;
+ info("Output memory dump of %zu bytes written at address 0x%p\n",
+ valMemDumpBytesWritten, startDumpAddress);
+ }
+
+ DumpDenoisedAudioFooter(memDumpBaseAddr + memDumpBytesWritten, memDumpMaxLen - memDumpBytesWritten);
+
+ info("Final results:\n");
+ profiler.PrintProfilingResult();
+ IncrementAppCtxClipIdx(ctx);
+
+ } while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
+
+ return true;
+ }
+
+ size_t DumpDenoisedAudioHeader(const char* filename, size_t dumpSize,
+ uint8_t *memAddress, size_t memSize){
+
+ if (memAddress == nullptr){
+ return 0;
+ }
+
+ int32_t filenameLength = strlen(filename);
+ size_t numBytesWritten = 0;
+ size_t numBytesToWrite = 0;
+ int32_t dumpSizeByte = dumpSize * sizeof(int16_t);
+ bool overflow = false;
+
+ /* Write the filename length */
+ numBytesToWrite = sizeof(filenameLength);
+ if (memSize - numBytesToWrite > 0) {
+ std::memcpy(memAddress, &filenameLength, numBytesToWrite);
+ numBytesWritten += numBytesToWrite;
+ memSize -= numBytesWritten;
+ } else {
+ overflow = true;
+ }
+
+ /* Write file name */
+ numBytesToWrite = filenameLength;
+ if(memSize - numBytesToWrite > 0) {
+ std::memcpy(memAddress + numBytesWritten, filename, numBytesToWrite);
+ numBytesWritten += numBytesToWrite;
+ memSize -= numBytesWritten;
+ } else {
+ overflow = true;
+ }
+
+ /* Write dumpSize in byte */
+ numBytesToWrite = sizeof(dumpSizeByte);
+ if(memSize - numBytesToWrite > 0) {
+ std::memcpy(memAddress + numBytesWritten, &(dumpSizeByte), numBytesToWrite);
+ numBytesWritten += numBytesToWrite;
+ memSize -= numBytesWritten;
+ } else {
+ overflow = true;
+ }
+
+ if(false == overflow) {
+ info("Audio Clip dump header info (%zu bytes) written to %p\n", numBytesWritten, memAddress);
+ } else {
+ printf_err("Not enough memory to dump Audio Clip header.\n");
+ }
+
+ return numBytesWritten;
+ }
+
+ size_t DumpDenoisedAudioFooter(uint8_t *memAddress, size_t memSize){
+ if ((memAddress == nullptr) || (memSize < 4)) {
+ return 0;
+ }
+ const int32_t eofMarker = -1;
+ std::memcpy(memAddress, &eofMarker, sizeof(int32_t));
+
+ return sizeof(int32_t);
+ }
+
+ size_t DumpOutputDenoisedAudioFrame(const std::vector<int16_t> &audioFrame,
+ uint8_t *memAddress, size_t memSize)
+ {
+ if (memAddress == nullptr) {
+ return 0;
+ }
+
+ size_t numByteToBeWritten = audioFrame.size() * sizeof(int16_t);
+ if( numByteToBeWritten > memSize) {
+ printf_err("Overflow error: Writing %d of %d bytes to memory @ 0x%p.\n", memSize, numByteToBeWritten, memAddress);
+ numByteToBeWritten = memSize;
+ }
+
+ std::memcpy(memAddress, audioFrame.data(), numByteToBeWritten);
+ info("Copied %zu bytes to %p\n", numByteToBeWritten, memAddress);
+
+ return numByteToBeWritten;
+ }
+
+ size_t DumpOutputTensorsToMemory(Model& model, uint8_t* memAddress, const size_t memSize)
+ {
+ const size_t numOutputs = model.GetNumOutputs();
+ size_t numBytesWritten = 0;
+ uint8_t* ptr = memAddress;
+
+ /* Iterate over all output tensors. */
+ for (size_t i = 0; i < numOutputs; ++i) {
+ const TfLiteTensor* tensor = model.GetOutputTensor(i);
+ const auto* tData = tflite::GetTensorData<uint8_t>(tensor);
+#if VERIFY_TEST_OUTPUT
+ arm::app::DumpTensor(tensor);
+#endif /* VERIFY_TEST_OUTPUT */
+ /* Ensure that we don't overflow the allowed limit. */
+ if (numBytesWritten + tensor->bytes <= memSize) {
+ if (tensor->bytes > 0) {
+ std::memcpy(ptr, tData, tensor->bytes);
+
+ info("Copied %zu bytes for tensor %zu to 0x%p\n",
+ tensor->bytes, i, ptr);
+
+ numBytesWritten += tensor->bytes;
+ ptr += tensor->bytes;
+ }
+ } else {
+ printf_err("Error writing tensor %zu to memory @ 0x%p\n",
+ i, memAddress);
+ break;
+ }
+ }
+
+ info("%zu bytes written to memory @ 0x%p\n", numBytesWritten, memAddress);
+
+ return numBytesWritten;
+ }
+
+ static void IncrementAppCtxClipIdx(ApplicationContext& ctx)
+ {
+ auto curClipIdx = ctx.Get<uint32_t>("clipIndex");
+ if (curClipIdx + 1 >= NUMBER_OF_FILES) {
+ ctx.Set<uint32_t>("clipIndex", 0);
+ return;
+ }
+ ++curClipIdx;
+ ctx.Set<uint32_t>("clipIndex", curClipIdx);
+ }
+
+ void QuantizeAndPopulateInput(rnn::vec1D32F& inputFeatures,
+ const float quantScale, const int quantOffset, TfLiteTensor* inputTensor)
+ {
+ const float minVal = std::numeric_limits<int8_t>::min();
+ const float maxVal = std::numeric_limits<int8_t>::max();
+
+ auto* inputTensorData = tflite::GetTensorData<int8_t>(inputTensor);
+
+ for (size_t i=0; i < inputFeatures.size(); ++i) {
+ float quantValue = ((inputFeatures[i] / quantScale) + quantOffset);
+ inputTensorData[i] = static_cast<int8_t>(std::min<float>(std::max<float>(quantValue, minVal), maxVal));
+ }
+ }
+
+
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/use_case/noise_reduction/usecase.cmake b/source/use_case/noise_reduction/usecase.cmake
new file mode 100644
index 0000000..14cff17
--- /dev/null
+++ b/source/use_case/noise_reduction/usecase.cmake
@@ -0,0 +1,110 @@
+#----------------------------------------------------------------------------
+# Copyright (c) 2021 Arm Limited. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#----------------------------------------------------------------------------
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+ 0x00200000
+ STRING)
+
+if (ETHOS_U_NPU_ENABLED)
+ set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/rnnoise_INT8_vela_${DEFAULT_NPU_CONFIG_ID}.tflite)
+else()
+ set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/rnnoise_INT8.tflite)
+endif()
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
+ ${DEFAULT_MODEL_PATH}
+ FILEPATH)
+
+USER_OPTION(${use_case}_FILE_PATH "Directory with custom WAV input files, or path to a single WAV file, to use in the evaluation application."
+ ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
+ PATH_OR_FILE)
+
+USER_OPTION(${use_case}_AUDIO_RATE "Specify the target sampling rate. Default is 48000."
+ 48000
+ STRING)
+
+USER_OPTION(${use_case}_AUDIO_MONO "Specify if the audio needs to be converted to mono. Default is ON."
+ ON
+ BOOL)
+
+USER_OPTION(${use_case}_AUDIO_OFFSET "Specify the offset to start reading after this time (in seconds). Default is 0."
+ 0
+ STRING)
+
+USER_OPTION(${use_case}_AUDIO_DURATION "Specify the audio duration to load (in seconds). If set to 0 the entire audio will be processed."
+ 0
+ STRING)
+
+USER_OPTION(${use_case}_AUDIO_RES_TYPE "Specify re-sampling algorithm to use. By default is 'kaiser_best'."
+ kaiser_best
+ STRING)
+
+USER_OPTION(${use_case}_AUDIO_MIN_SAMPLES "Specify the minimum number of samples to use. Default is 480, if the audio is shorter it will be automatically padded."
+ 480
+ STRING)
+
+# Generate input files from audio wav files
+generate_audio_code(${${use_case}_FILE_PATH} ${SRC_GEN_DIR} ${INC_GEN_DIR}
+ ${${use_case}_AUDIO_RATE}
+ ${${use_case}_AUDIO_MONO}
+ ${${use_case}_AUDIO_OFFSET}
+ ${${use_case}_AUDIO_DURATION}
+ ${${use_case}_AUDIO_RES_TYPE}
+ ${${use_case}_AUDIO_MIN_SAMPLES})
+
+
+set(EXTRA_MODEL_CODE
+ "/* Model parameters for ${use_case} */"
+ "extern const int g_FrameLength = 480"
+ "extern const int g_FrameStride = 480"
+ "extern const uint32_t g_NumInputFeatures = 42*1" # Single time-step input of 42 features.
+ )
+
+# Generate model file.
+generate_tflite_code(
+ MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+ DESTINATION ${SRC_GEN_DIR}
+ EXPRESSIONS ${EXTRA_MODEL_CODE}
+)
+
+
+# For MPS3, allow dumping of output data to memory, based on these parameters:
+if (TARGET_PLATFORM STREQUAL mps3)
+ USER_OPTION(${use_case}_MEM_DUMP_BASE_ADDR
+ "Inference output dump address for ${use_case}"
+ 0x80000000 # DDR bank 2
+ STRING)
+
+ USER_OPTION(${use_case}_MEM_DUMP_LEN
+ "Inference output dump buffer size for ${use_case}"
+ 0x00100000 # 1 MiB
+ STRING)
+
+ # Add special compile definitions for this use case files:
+ set(${use_case}_COMPILE_DEFS
+ "MEM_DUMP_BASE_ADDR=${${use_case}_MEM_DUMP_BASE_ADDR}"
+ "MEM_DUMP_LEN=${${use_case}_MEM_DUMP_LEN}")
+
+ file(GLOB_RECURSE SRC_FILES
+ "${SRC_USE_CASE}/${use_case}/src/*.cpp"
+ "${SRC_USE_CASE}/${use_case}/src/*.cc")
+
+ set_source_files_properties(
+ ${SRC_FILES}
+ PROPERTIES COMPILE_DEFINITIONS
+ "${${use_case}_COMPILE_DEFS}")
+endif()