summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorRichard Burton <richard.burton@arm.com>2021-11-10 16:27:14 +0000
committerRichard <richard.burton@arm.com>2021-11-10 16:34:16 +0000
commit005534664e192cf909a11435c4bc4696b1f4c51f (patch)
treef8314bd284561e1f0ff68fc393ee22d0318ae162 /tests
parentdee53bc7769d6201ec27deea4405c0df6c9b0623 (diff)
downloadml-embedded-evaluation-kit-005534664e192cf909a11435c4bc4696b1f4c51f.tar.gz
MLECO-2354 MLECO-2355 MLECO-2356: Moving noise reduction to public repository
* Use RNNoise model from PMZ * Add Noise reduction use-case Signed-off-by: Richard burton <richard.burton@arm.com> Change-Id: Ia8cc7ef102e22a5ff8bfbd3833594a4905a66057
Diffstat (limited to 'tests')
-rw-r--r--tests/use_case/ad/InferenceTestAD.cc5
-rw-r--r--tests/use_case/asr/InferenceTestWav2Letter.cc5
-rw-r--r--tests/use_case/img_class/InferenceTestMobilenetV2.cc10
-rw-r--r--tests/use_case/kws/InferenceTestDSCNN.cc9
-rw-r--r--tests/use_case/kws_asr/InferenceTestDSCNN.cc9
-rw-r--r--tests/use_case/kws_asr/InferenceTestWav2Letter.cc5
-rw-r--r--tests/use_case/noise_reduction/InferenceTestRNNoise.cc133
-rw-r--r--tests/use_case/noise_reduction/NoiseReductionTests.cc18
-rw-r--r--tests/use_case/noise_reduction/RNNNoiseUCTests.cc206
-rw-r--r--tests/use_case/noise_reduction/RNNUCTestCaseData.hpp180
-rw-r--r--tests/use_case/noise_reduction/RNNoiseModelTests.cc166
-rw-r--r--tests/use_case/noise_reduction/RNNoiseProcessingTests.cpp245
-rw-r--r--tests/use_case/vww/InferenceVisualWakeWordModelTests.cc6
13 files changed, 975 insertions, 22 deletions
diff --git a/tests/use_case/ad/InferenceTestAD.cc b/tests/use_case/ad/InferenceTestAD.cc
index ad785e8..2933fbe 100644
--- a/tests/use_case/ad/InferenceTestAD.cc
+++ b/tests/use_case/ad/InferenceTestAD.cc
@@ -69,7 +69,7 @@ void TestInference(const T *input_goldenFV, const T *output_goldenFV, arm::app::
TfLiteTensor *outputTensor = model.GetOutputTensor(0);
REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);
@@ -92,7 +92,8 @@ TEST_CASE("Running random inference with TensorFlow Lite Micro and AdModel Int8"
TEST_CASE("Running golden vector inference with TensorFlow Lite Micro and AdModel Int8", "[AD]")
{
- for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_IFM_FILES);
+ for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
auto input_goldenFV = get_ifm_data_array(i);;
auto output_goldenFV = get_ofm_data_array(i);
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
index 1f9cb80..3e30bd2 100644
--- a/tests/use_case/asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -76,7 +76,7 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
TfLiteTensor* outputTensor = model.GetOutputTensor(0);
REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);
@@ -87,7 +87,8 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
{
- for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_IFM_FILES);
+ for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
auto input_goldenFV = get_ifm_data_array(i);;
auto output_goldenFV = get_ofm_data_array(i);
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
index bb89c99..07bd78f 100644
--- a/tests/use_case/img_class/InferenceTestMobilenetV2.cc
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -29,9 +29,9 @@ bool RunInference(arm::app::Model& model, const int8_t imageData[])
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+ const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ?
inputTensor->bytes :
- IFM_DATA_SIZE;
+ IFM_0_DATA_SIZE;
memcpy(inputTensor->data.data, imageData, copySz);
if(model.IsDataSigned()){
@@ -51,7 +51,7 @@ void TestInference(int imageIdx, arm::app::Model& model, T tolerance) {
TfLiteTensor* outputTensor = model.GetOutputTensor(0);
REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);
@@ -71,12 +71,12 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and MobileNeV2 Uint8", "
REQUIRE(model.Init());
REQUIRE(model.IsInited());
- for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
TestInference<uint8_t>(i, model, 1);
}
}
- for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
DYNAMIC_SECTION("Executing inference with re-init")
{
arm::app::MobileNetModel model{};
diff --git a/tests/use_case/kws/InferenceTestDSCNN.cc b/tests/use_case/kws/InferenceTestDSCNN.cc
index 7ce55dd..8918073 100644
--- a/tests/use_case/kws/InferenceTestDSCNN.cc
+++ b/tests/use_case/kws/InferenceTestDSCNN.cc
@@ -29,9 +29,9 @@ bool RunInference(arm::app::Model& model, const int8_t vec[])
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+ const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ?
inputTensor->bytes :
- IFM_DATA_SIZE;
+ IFM_0_DATA_SIZE;
memcpy(inputTensor->data.data, vec, copySz);
return model.RunInference();
@@ -65,7 +65,7 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
TfLiteTensor* outputTensor = model.GetOutputTensor(0);
REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);
@@ -87,7 +87,8 @@ TEST_CASE("Running random inference with TensorFlow Lite Micro and DsCnnModel In
TEST_CASE("Running inference with TensorFlow Lite Micro and DsCnnModel Uint8", "[DS_CNN]")
{
- for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES);
+ for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
const int8_t* input_goldenFV = get_ifm_data_array(i);;
const int8_t* output_goldenFV = get_ofm_data_array(i);
diff --git a/tests/use_case/kws_asr/InferenceTestDSCNN.cc b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
index 134003d..ad1731b 100644
--- a/tests/use_case/kws_asr/InferenceTestDSCNN.cc
+++ b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
@@ -29,9 +29,9 @@ bool RunInference(arm::app::Model& model, const int8_t vec[]) {
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+ const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ?
inputTensor->bytes :
- IFM_DATA_SIZE;
+ IFM_0_DATA_SIZE;
memcpy(inputTensor->data.data, vec, copySz);
return model.RunInference();
@@ -63,7 +63,7 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
TfLiteTensor* outputTensor = model.GetOutputTensor(0);
REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);
@@ -83,7 +83,8 @@ TEST_CASE("Running random inference with Tflu and DsCnnModel Int8", "[DS_CNN]")
}
TEST_CASE("Running inference with Tflu and DsCnnModel Uint8", "[DS_CNN]") {
- for (uint32_t i = 0; i < NUMBER_OF_FM_FILES; ++i) {
+ REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES);
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
const int8_t* input_goldenFV = get_ifm_data_array(i);
const int8_t* output_goldenFV = get_ofm_data_array(i);
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
index 1b14a42..477a1dd 100644
--- a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -78,7 +78,7 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
TfLiteTensor* outputTensor = model.GetOutputTensor(0);
REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);
@@ -89,7 +89,8 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
{
- for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES);
+ for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
auto input_goldenFV = get_ifm_data_array(i);;
auto output_goldenFV = get_ofm_data_array(i);
diff --git a/tests/use_case/noise_reduction/InferenceTestRNNoise.cc b/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
new file mode 100644
index 0000000..f32a460
--- /dev/null
+++ b/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "RNNoiseModel.hpp"
+#include "TestData_noise_reduction.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+namespace test {
+namespace rnnoise {
+
+ bool RunInference(arm::app::Model& model, const std::vector<std::vector<int8_t>> inData)
+ {
+ for (size_t i = 0; i < model.GetNumInputs(); ++i) {
+ TfLiteTensor* inputTensor = model.GetInputTensor(i);
+ REQUIRE(inputTensor);
+ memcpy(inputTensor->data.data, inData[i].data(), inData[i].size());
+ }
+
+ return model.RunInference();
+ }
+
+ bool RunInferenceRandom(arm::app::Model& model)
+ {
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist {-128, 127};
+
+ auto gen = [&dist, &mersenneGen](){
+ return dist(mersenneGen);
+ };
+
+ std::vector<std::vector<int8_t>> randomInput{NUMBER_OF_IFM_FILES};
+ for (size_t i = 0; i < model.GetNumInputs(); ++i) {
+ TfLiteTensor *inputTensor = model.GetInputTensor(i);
+ REQUIRE(inputTensor);
+ randomInput[i].resize(inputTensor->bytes);
+ std::generate(std::begin(randomInput[i]), std::end(randomInput[i]), gen);
+ }
+
+ REQUIRE(RunInference(model, randomInput));
+ return true;
+ }
+
+ TEST_CASE("Running random inference with Tflu and RNNoise Int8", "[RNNoise]")
+ {
+ arm::app::RNNoiseModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ REQUIRE(RunInferenceRandom(model));
+ }
+
+ template<typename T>
+ void TestInference(const std::vector<std::vector<T>> input_goldenFV, const std::vector<std::vector<T>> output_goldenFV, arm::app::Model& model)
+ {
+ for (size_t i = 0; i < model.GetNumInputs(); ++i) {
+ TfLiteTensor* inputTensor = model.GetInputTensor(i);
+ REQUIRE(inputTensor);
+ }
+
+ REQUIRE(RunInference(model, input_goldenFV));
+
+ for (size_t i = 0; i < model.GetNumOutputs(); ++i) {
+ TfLiteTensor *outputTensor = model.GetOutputTensor(i);
+
+ REQUIRE(outputTensor);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
+
+ for (size_t j = 0; j < outputTensor->bytes; j++) {
+ REQUIRE(static_cast<int>(tensorData[j]) == static_cast<int>((output_goldenFV[i][j])));
+ }
+ }
+ }
+
+ TEST_CASE("Running inference with Tflu and RNNoise Int8", "[RNNoise]")
+ {
+ std::vector<std::vector<int8_t>> goldenInputFV {NUMBER_OF_IFM_FILES};
+ std::vector<std::vector<int8_t>> goldenOutputFV {NUMBER_OF_OFM_FILES};
+
+ std::array<size_t, NUMBER_OF_IFM_FILES> inputSizes = {IFM_0_DATA_SIZE,
+ IFM_1_DATA_SIZE,
+ IFM_2_DATA_SIZE,
+ IFM_3_DATA_SIZE};
+
+ std::array<size_t, NUMBER_OF_OFM_FILES> outputSizes = {OFM_0_DATA_SIZE,
+ OFM_1_DATA_SIZE,
+ OFM_2_DATA_SIZE,
+ OFM_3_DATA_SIZE,
+ OFM_4_DATA_SIZE};
+
+ for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
+ goldenInputFV[i].resize(inputSizes[i]);
+ std::memcpy(goldenInputFV[i].data(), get_ifm_data_array(i), inputSizes[i]);
+ }
+ for (uint32_t i = 0 ; i < NUMBER_OF_OFM_FILES; ++i) {
+ goldenOutputFV[i].resize(outputSizes[i]);
+ std::memcpy(goldenOutputFV[i].data(), get_ofm_data_array(i), outputSizes[i]);
+ }
+
+ DYNAMIC_SECTION("Executing inference with re-init")
+ {
+ arm::app::RNNoiseModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ TestInference<int8_t>(goldenInputFV, goldenOutputFV, model);
+ }
+ }
+
+} /* namespace rnnoise */
+} /* namespace test */
diff --git a/tests/use_case/noise_reduction/NoiseReductionTests.cc b/tests/use_case/noise_reduction/NoiseReductionTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/noise_reduction/NoiseReductionTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/noise_reduction/RNNNoiseUCTests.cc b/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
new file mode 100644
index 0000000..d57fced
--- /dev/null
+++ b/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "RNNoiseModel.hpp"
+#include "UseCaseHandler.hpp"
+#include "InputFiles.hpp"
+#include "RNNUCTestCaseData.hpp"
+#include "UseCaseCommonUtils.hpp"
+
+#include <catch.hpp>
+#include <hal.h>
+#include <Profiler.hpp>
+#include <iostream>
+#define PLATFORM \
+hal_platform platform; \
+data_acq_module data_acq; \
+data_psn_module data_psn; \
+platform_timer timer; \
+hal_init(&platform, &data_acq, &data_psn, &timer); \
+hal_platform_init(&platform);
+
+#define CONTEXT \
+arm::app::ApplicationContext caseContext; \
+arm::app::Profiler profiler{&platform, "noise_reduction"}; \
+caseContext.Set<arm::app::Profiler&>("profiler", profiler); \
+caseContext.Set<hal_platform&>("platform", platform); \
+caseContext.Set<arm::app::RNNoiseModel&>("model", model);
+
+TEST_CASE("Verify output tensor memory dump")
+{
+ constexpr size_t maxMemDumpSz = 0x100000; /* 1 MiB worth of space */
+ std::vector<uint8_t> memPool(maxMemDumpSz); /* Memory pool */
+ arm::app::RNNoiseModel model{};
+
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ /* Populate the output tensors */
+ const size_t numOutputs = model.GetNumOutputs();
+ size_t sizeToWrite = 0;
+ size_t lastTensorSize = model.GetOutputTensor(numOutputs - 1)->bytes;
+
+ for (size_t i = 0; i < numOutputs; ++i) {
+ TfLiteTensor* tensor = model.GetOutputTensor(i);
+ auto* tData = tflite::GetTensorData<uint8_t>(tensor);
+
+ if (tensor->bytes > 0) {
+ memset(tData, static_cast<uint8_t>(i), tensor->bytes);
+ sizeToWrite += tensor->bytes;
+ }
+ }
+
+
+ SECTION("Positive use case")
+ {
+ /* Run the memory dump */
+ auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), memPool.size());
+ REQUIRE(sizeToWrite == bytesWritten);
+
+ /* Verify the dump */
+ size_t k = 0;
+ for (size_t i = 0; i < numOutputs && k < memPool.size(); ++i) {
+ TfLiteTensor* tensor = model.GetOutputTensor(i);
+ auto* tData = tflite::GetTensorData<uint8_t>(tensor);
+
+ for (size_t j = 0; j < tensor->bytes && k < memPool.size(); ++j) {
+ REQUIRE(tData[j] == memPool[k++]);
+ }
+ }
+ }
+
+ SECTION("Limited memory - skipping last tensor")
+ {
+ /* Run the memory dump */
+ auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), sizeToWrite - 1);
+ REQUIRE(lastTensorSize > 0);
+ REQUIRE(bytesWritten == sizeToWrite - lastTensorSize);
+ }
+
+ SECTION("Zero memory")
+ {
+ /* Run the memory dump */
+ auto bytesWritten = DumpOutputTensorsToMemory(model, memPool.data(), 0);
+ REQUIRE(bytesWritten == 0);
+ }
+}
+
+TEST_CASE("Inference run all clips", "[RNNoise]")
+{
+ PLATFORM
+
+ arm::app::RNNoiseModel model;
+
+ CONTEXT
+
+ caseContext.Set<uint32_t>("clipIndex", 0);
+ caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
+ caseContext.Set<uint32_t>("frameLength", g_FrameLength);
+ caseContext.Set<uint32_t>("frameStride", g_FrameStride);
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ REQUIRE(arm::app::NoiseReductionHandler(caseContext, true));
+}
+
+std::function<uint32_t(const uint32_t)> get_golden_input_p232_208_array_size(const uint32_t numberOfFeatures) {
+
+ return [numberOfFeatures](const uint32_t) -> uint32_t{
+ return numberOfFeatures;
+ };
+}
+
+const char* get_test_filename(const uint32_t idx) {
+ auto name = get_filename(idx);
+ REQUIRE(std::string("p232_208.wav") == name);
+ return "p232_208.wav";
+}
+
+void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
+ PLATFORM
+
+ arm::app::RNNoiseModel model;
+
+ CONTEXT
+
+ caseContext.Set<std::function<const int16_t*(const uint32_t)>>("features", get_audio_array);
+ caseContext.Set<std::function<const char* (const uint32_t)>>("featureFileNames", get_test_filename);
+ caseContext.Set<uint32_t>("frameLength", g_FrameLength);
+ caseContext.Set<uint32_t>("frameStride", g_FrameStride);
+ caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ size_t oneInferenceOutSizeBytes = g_FrameLength * sizeof(int16_t);
+
+ auto infIndex = 0;
+ for (auto numInf: numberOfInferences) {
+ DYNAMIC_SECTION("Number of features: "<< numInf) {
+ caseContext.Set<uint32_t>("clipIndex", 1); /* Only getting p232_208.wav for tests. */
+ uint32_t audioSizeInput = numInf*g_FrameLength;
+ caseContext.Set<std::function<uint32_t(const uint32_t)>>("featureSizes",
+ get_golden_input_p232_208_array_size(audioSizeInput));
+
+ size_t headerNumBytes = 4 + 12 + 4; /* Filename length, filename (12 for p232_208.wav), dump size. */
+ size_t footerNumBytes = 4; /* Eof value. */
+ size_t memDumpMaxLenBytes = headerNumBytes + footerNumBytes + oneInferenceOutSizeBytes * numInf;
+
+ std::vector<uint8_t > memDump(memDumpMaxLenBytes);
+ size_t undefMemDumpBytesWritten = 0;
+ caseContext.Set<size_t>("MEM_DUMP_LEN", memDumpMaxLenBytes);
+ caseContext.Set<uint8_t*>("MEM_DUMP_BASE_ADDR", memDump.data());
+ caseContext.Set<size_t*>("MEM_DUMP_BYTE_WRITTEN", &undefMemDumpBytesWritten);
+
+ /* Inference. */
+ REQUIRE(arm::app::NoiseReductionHandler(caseContext, false));
+
+ /* The expected output after post-processing. */
+ std::vector<int16_t> golden(&ofms[infIndex][0], &ofms[infIndex][0] + g_FrameLength);
+
+ size_t startOfLastInfOut = undefMemDumpBytesWritten - oneInferenceOutSizeBytes;
+
+ /* The actual result from the usecase handler. */
+ std::vector<int16_t> runtime(g_FrameLength);
+ std::memcpy(runtime.data(), &memDump[startOfLastInfOut], oneInferenceOutSizeBytes);
+
+ /* Margin of 22 is 0.03% error. */
+ REQUIRE_THAT(golden, Catch::Matchers::Approx(runtime).margin(22));
+ }
+ ++infIndex;
+ }
+}
+
+TEST_CASE("Inference by index - one inference", "[RNNoise]")
+{
+ auto totalAudioSize = get_audio_array_size(1);
+ REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
+
+ /* Run 1 inference */
+ std::vector<uint32_t> numberOfInferences = {1};
+ testInfByIndex(numberOfInferences);
+}
+
+TEST_CASE("Inference by index - several inferences", "[RNNoise]")
+{
+ auto totalAudioSize = get_audio_array_size(1);
+ REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
+
+ /* 3 different inference amounts: 1, 2 and all inferences required to cover total feature set */
+ uint32_t totalInferences = totalAudioSize / g_FrameLength;
+ std::vector<uint32_t> numberOfInferences = {1, 2, totalInferences};
+ testInfByIndex(numberOfInferences);
+}
diff --git a/tests/use_case/noise_reduction/RNNUCTestCaseData.hpp b/tests/use_case/noise_reduction/RNNUCTestCaseData.hpp
new file mode 100644
index 0000000..37bc6a5
--- /dev/null
+++ b/tests/use_case/noise_reduction/RNNUCTestCaseData.hpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef RNNUC_TEST_DATA
+#define RNNUC_TEST_DATA
+
+#include <cstdint>
+
+/* 1st inference denoised output. */
+int16_t denoisedInf0 [480] = {
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, -0x1, -0x1, -0x1, -0x1, -0x1,
+ -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, 0x0, -0x1,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1,
+ -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1, -0x1,
+ -0x1, -0x1, -0x1, -0x1, 0x0, -0x1, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x0, 0x1, 0x1, 0x0, 0x1, 0x0, 0x1, 0x1, 0x0,
+ 0x1, 0x0, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x2, 0x1, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x3, 0x2, 0x3, 0x3, 0x3, 0x3, 0x3,
+ 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, 0x3, 0x3,
+ 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3,
+ 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3,
+ 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3,
+ 0x3, 0x3, 0x2, 0x3, 0x3, 0x3, 0x3, 0x2, 0x3, 0x2,
+ 0x3, 0x3, 0x2, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x2,
+ 0x3, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0x0,
+ 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1,
+ 0x1, 0x1, 0x2, 0x2, 0x3, 0x3, 0x3, 0x4, 0x4, 0x4,
+ 0x4, 0x5, 0x4, 0x4, 0x5, 0x4, 0x5, 0x4, 0x4, 0x4,
+ 0x3, 0x3, 0x2, 0x3, 0x2, 0x1, 0x2, 0x1, 0x1, 0x1,
+ 0x1, 0x2, 0x2, 0x3, 0x3, 0x4, 0x5, 0x5, 0x7, 0x7,
+ 0x8, 0x8, 0x9, 0xa, 0x9, 0xa, 0xa, 0xb, 0xa, 0xa,
+ 0xb, 0xa, 0xa, 0x9, 0xa, 0xa, 0x9, 0xa, 0x8, 0x9,
+ 0x9, 0x9, 0x9, 0x8, 0xa, 0x9, 0xa, 0xb, 0xb, 0xc,
+ 0xc, 0xe, 0xf, 0xf, 0x11, 0x11, 0x13, 0x13, 0x14, 0x15,
+ 0x14, 0x16, 0x14, 0x14, 0x12, 0x11, 0x10, 0xd, 0xd, 0xb,
+ 0xb, 0xb, 0xa, 0xc, 0xb, 0xd, 0xd, 0xe, 0x11, 0x11,
+ 0x14, 0x15, 0x17, 0x19, 0x1a, 0x1d, 0x1c, 0x1c, 0x1b, 0x1a,
+};
+
+/* 2nd inference denoised output. */
+int16_t denoisedInf1 [480] = {
+ 0x11, 0x17, 0x29, 0x23, 0x33, 0x43, 0x3f, 0x53, 0x52, 0x4b,
+ 0x46, 0x32, 0x27, 0x13, -0x2, -0x1c, -0x2f, -0x2f, -0x2e, -0x2c,
+ -0x34, -0x31, -0x2f, -0x34, -0x30, -0x4a, -0x38, -0x18, -0x25, -0x1a,
+ -0x15, -0x14, -0x12, -0x1d, -0x21, -0x2f, -0x32, -0x36, -0x33, -0x29,
+ -0x31, -0x23, -0x26, -0x30, -0x2b, -0x38, -0x2e, -0x22, -0x36, -0x53,
+ -0x60, -0x5a, -0x62, -0x6c, -0x84, -0xa1, -0xa0, -0xb1, -0xc2, -0xb0,
+ -0xa9, -0x9c, -0x85, -0x97, -0xa2, -0x99, -0x9f, -0x9e, -0xa4, -0xa6,
+ -0x97, -0x92, -0x8e, -0x9f, -0xb1, -0xb6, -0xbf, -0xc4, -0xcc, -0xae,
+ -0x91, -0x8a, -0x7f, -0x8a, -0x84, -0x8e, -0x98, -0x88, -0xa5, -0x9f,
+ -0x97, -0xa2, -0x8e, -0x97, -0x88, -0x76, -0x7c, -0x7c, -0x91, -0x85,
+ -0x82, -0x88, -0x78, -0x78, -0x5f, -0x55, -0x48, -0x3c, -0x4b, -0x2f,
+ -0x3a, -0x48, -0x31, -0x3b, -0x21, -0xc, -0x18, -0x16, -0x29, -0x2e,
+ -0x30, -0x39, -0x39, -0x3f, -0x30, -0x2f, -0x3b, -0x30, -0x33, -0x31,
+ -0x29, -0x38, -0x3d, -0x36, -0x3e, -0x48, -0x46, -0x46, -0x3a, -0x39,
+ -0x42, -0x3a, -0x44, -0x52, -0x53, -0x60, -0x60, -0x66, -0x6d, -0x5b,
+ -0x53, -0x47, -0x35, -0x2b, -0x24, -0x26, -0x24, -0x20, -0x20, -0x26,
+ -0x23, -0x17, -0xf, -0x6, -0xb, -0xc, -0x22, -0x39, -0x21, -0x25,
+ -0x21, -0x17, -0x23, -0x10, -0x24, -0x2b, -0x31, -0x5c, -0x43, -0x42,
+ -0x53, -0x33, -0x19, -0x14, -0x28, -0x29, -0x33, -0x36, -0x29, -0x46,
+ -0x3c, -0x35, -0x3e, -0x30, -0x49, -0x52, -0x55, -0x5f, -0x56, -0x50,
+ -0x47, -0x4b, -0x4f, -0x5e, -0x5e, -0x47, -0x56, -0x4f, -0x37, -0x27,
+ -0x15, -0x10, 0x6, 0x15, 0x2b, 0x36, 0x31, 0x45, 0x47, 0x53,
+ 0x4d, 0x3f, 0x55, 0x53, 0x5d, 0x65, 0x5a, 0x55, 0x45, 0x40,
+ 0x39, 0x35, 0x32, 0x35, 0x44, 0x36, 0x3d, 0x4b, 0x4c, 0x51,
+ 0x4c, 0x5a, 0x5b, 0x60, 0x69, 0x58, 0x53, 0x3f, 0x22, -0x1,
+ -0x21, -0x20, -0x2a, -0x30, -0x2c, -0x2a, -0x2f, -0x34, -0x28, -0x30,
+ -0x31, -0x2d, -0x29, -0x1d, -0x2b, -0x23, -0x1c, -0x20, -0x13, -0x12,
+ -0x9, -0x18, -0x1d, -0x17, -0x2c, -0x24, -0x26, -0x2e, -0x29, -0x3c,
+ -0x46, -0x51, -0x62, -0x74, -0x80, -0x88, -0x9d, -0xa4, -0xac, -0xa1,
+ -0x92, -0x8c, -0x6f, -0x65, -0x53, -0x42, -0x4b, -0x3a, -0x35, -0x44,
+ -0x44, -0x46, -0x5c, -0x6f, -0x77, -0x8d, -0x90, -0x96, -0xa3, -0x9c,
+ -0xa8, -0xa1, -0x8e, -0x7e, -0x5d, -0x50, -0x40, -0x35, -0x36, -0x30,
+ -0x3a, -0x32, -0x2b, -0x34, -0x33, -0x40, -0x51, -0x51, -0x4a, -0x47,
+ -0x35, -0x20, -0x19, -0xa, -0xd, -0x1b, -0x15, -0x19, -0x22, -0x1f,
+ -0x1c, -0x21, -0x21, -0x17, -0x1e, -0x1d, -0x4, 0x4, 0xd, 0x24,
+ 0x2c, 0x3d, 0x54, 0x50, 0x58, 0x5f, 0x5d, 0x64, 0x56, 0x5b,
+ 0x67, 0x60, 0x76, 0x7d, 0x77, 0x8b, 0x96, 0x9b, 0x9e, 0xa3,
+ 0xa8, 0x9d, 0x9a, 0x9a, 0x87, 0x78, 0x64, 0x49, 0x44, 0x38,
+ 0x11, -0x11, -0x24, -0x29, -0x35, -0x3f, -0x35, -0x32, -0x20, -0x1a,
+ -0x2a, -0x1d, -0x28, -0x3a, -0x3f, -0x53, -0x56, -0x5e, -0x59, -0x41,
+ -0x40, -0x2e, -0x22, -0x1a, 0x7, 0x19, 0x27, 0x32, 0x37, 0x38,
+ 0x23, 0x11, -0x7, -0x1f, -0x29, -0x36, -0x34, -0x35, -0x2f, -0xb,
+ 0xb, 0x14, 0x25, 0x3f, 0x51, 0x49, 0x54, 0x6a, 0x5f, 0x5b,
+ 0x66, 0x5d, 0x59, 0x4f, 0x3a, 0x3b, 0x30, 0x2f, 0x2d, 0x1b,
+ 0x2f, 0x2e, 0x28, 0x3a, 0x2c, 0x37, 0x47, 0x4c, 0x5e, 0x58,
+ 0x52, 0x4b, 0x45, 0x43, 0x36, 0x3f, 0x42, 0x49, 0x54, 0x4e,
+ 0x61, 0x60, 0x59, 0x6b, 0x65, 0x60, 0x5e, 0x4e, 0x3d, 0x2e,
+ 0x2a, 0x2c, 0x2f, 0x2b, 0x30, 0x3d, 0x47, 0x57, 0x61, 0x6d,
+};
+
+/* Final denoised results after 134 steps */
+int16_t denoisedInf2 [480] = {
+ -0x66, -0x8a, -0x8a, -0x6f, -0x99, -0x9c, -0x92, -0xbf, -0xa4, -0xb1,
+ -0xf0, -0xf1, -0xf3, -0xe5, -0xf9, -0x107, -0xd2, -0xe8, -0x100, -0xdb,
+ -0xda, -0xec, -0xfa, -0xfd, -0xe7, -0xd6, -0xe6, -0xfd, -0x102, -0xfc,
+ -0xfd, -0x11f, -0x123, -0x119, -0x11c, -0xf6, -0x10a, -0x130, -0x10f, -0x107,
+ -0x106, -0x10e, -0x11f, -0xff, -0xed, -0xf3, -0xee, -0xfb, -0x10f, -0x108,
+ -0xe9, -0xd4, -0xda, -0xe7, -0xed, -0xf0, -0xf1, -0x10c, -0xff, -0xd3,
+ -0xfb, -0xed, -0xc9, -0x107, -0xe4, -0xbb, -0xe9, -0xeb, -0xf6, -0xfb,
+ -0x114, -0x12e, -0x105, -0x116, -0x134, -0x138, -0x149, -0x12a, -0x11a, -0x13c,
+ -0x151, -0x13f, -0x13a, -0x16f, -0x176, -0x15d, -0x16d, -0x169, -0x163, -0x170,
+ -0x176, -0x181, -0x17d, -0x173, -0x18b, -0x1af, -0x1ad, -0x185, -0x18c, -0x1b0,
+ -0x1aa, -0x1b9, -0x1c0, -0x1b7, -0x1d5, -0x1d7, -0x1ca, -0x1cd, -0x1e8, -0x1f3,
+ -0x1c6, -0x1cd, -0x1c2, -0x191, -0x1a2, -0x1a3, -0x193, -0x187, -0x19b, -0x1b0,
+ -0x184, -0x199, -0x1bb, -0x1a9, -0x196, -0x18c, -0x1b7, -0x1b0, -0x19d, -0x1b9,
+ -0x1b2, -0x1c2, -0x1d1, -0x1dd, -0x1ce, -0x1a6, -0x1cf, -0x1e4, -0x1dc, -0x1c9,
+ -0x1bc, -0x1e2, -0x1c8, -0x1c7, -0x1d5, -0x1c1, -0x1dc, -0x1bd, -0x1cd, -0x1fe,
+ -0x1d7, -0x1e6, -0x1f3, -0x1f3, -0x201, -0x1f0, -0x1f8, -0x1f0, -0x1f4, -0x206,
+ -0x1f3, -0x206, -0x20d, -0x1f5, -0x1e1, -0x1d5, -0x1fe, -0x214, -0x1f4, -0x1f3,
+ -0x21a, -0x232, -0x214, -0x203, -0x20b, -0x1fc, -0x1f9, -0x1ef, -0x1e5, -0x1ef,
+ -0x1de, -0x1dd, -0x1ea, -0x1f2, -0x219, -0x21d, -0x201, -0x1ff, -0x1fa, -0x205,
+ -0x21f, -0x215, -0x210, -0x217, -0x20c, -0x21f, -0x223, -0x202, -0x208, -0x21f,
+ -0x233, -0x22f, -0x221, -0x229, -0x233, -0x239, -0x218, -0x21d, -0x242, -0x22e,
+ -0x23d, -0x239, -0x22f, -0x251, -0x238, -0x22e, -0x22e, -0x234, -0x236, -0x1fc,
+ -0x220, -0x254, -0x241, -0x249, -0x250, -0x260, -0x25e, -0x244, -0x24c, -0x267,
+ -0x268, -0x25d, -0x272, -0x24e, -0x245, -0x275, -0x259, -0x254, -0x251, -0x252,
+ -0x27e, -0x251, -0x23f, -0x25b, -0x24c, -0x254, -0x270, -0x274, -0x265, -0x267,
+ -0x265, -0x274, -0x27f, -0x25c, -0x279, -0x282, -0x266, -0x281, -0x271, -0x264,
+ -0x26e, -0x262, -0x262, -0x267, -0x270, -0x25e, -0x260, -0x276, -0x269, -0x273,
+ -0x286, -0x282, -0x27d, -0x27d, -0x282, -0x292, -0x289, -0x25e, -0x263, -0x253,
+ -0x22b, -0x24a, -0x26d, -0x27c, -0x263, -0x251, -0x269, -0x256, -0x25d, -0x263,
+ -0x259, -0x26b, -0x267, -0x26e, -0x267, -0x267, -0x265, -0x24f, -0x277, -0x25e,
+ -0x24d, -0x28e, -0x26b, -0x251, -0x25b, -0x256, -0x26f, -0x256, -0x245, -0x25c,
+ -0x266, -0x26d, -0x266, -0x260, -0x25f, -0x265, -0x25d, -0x254, -0x26b, -0x257,
+ -0x252, -0x27d, -0x270, -0x265, -0x274, -0x25a, -0x24d, -0x25b, -0x258, -0x255,
+ -0x256, -0x25c, -0x260, -0x247, -0x24b, -0x25a, -0x24e, -0x250, -0x23b, -0x234,
+ -0x254, -0x242, -0x22b, -0x241, -0x247, -0x231, -0x22a, -0x223, -0x20c, -0x212,
+ -0x219, -0x209, -0x203, -0x203, -0x200, -0x205, -0x217, -0x212, -0x205, -0x20c,
+ -0x1ec, -0x1ef, -0x20d, -0x1f2, -0x1ee, -0x1f3, -0x1eb, -0x1e4, -0x1ca, -0x1c6,
+ -0x1b7, -0x1b2, -0x1d4, -0x1d9, -0x1b7, -0x199, -0x1b7, -0x1c7, -0x1a5, -0x199,
+ -0x18d, -0x1a7, -0x1c0, -0x1a9, -0x1b6, -0x1a7, -0x17f, -0x18c, -0x186, -0x172,
+ -0x173, -0x178, -0x192, -0x190, -0x16d, -0x174, -0x17f, -0x179, -0x173, -0x15b,
+ -0x167, -0x17b, -0x16b, -0x169, -0x15c, -0x160, -0x16c, -0x156, -0x159, -0x151,
+ -0x13f, -0x147, -0x13f, -0x144, -0x133, -0x116, -0x12b, -0x134, -0x120, -0x118,
+ -0x115, -0x110, -0x114, -0x125, -0x128, -0x11f, -0x112, -0xfb, -0xf1, -0xe9,
+ -0xc2, -0xa7, -0xb3, -0xc3, -0xbf, -0x9f, -0x96, -0xa6, -0xa8, -0xb6,
+ -0xa8, -0x8e, -0xa6, -0xb9, -0xb1, -0x9e, -0x96, -0x80, -0x69, -0x6a,
+ -0x55, -0x5b, -0x67, -0x69, -0x7b, -0x5d, -0x67, -0x6a, -0x48, -0x66,
+ -0x50, -0x37, -0x41, -0x42, -0x45, -0x1a, -0x23, -0x33, -0x27, -0x3a,
+ -0x1b, -0xf, -0x4, 0x2, -0x12, 0x8, -0x11, 0x7, 0x29, 0x8,
+};
+
+static int16_t* ofms[3] = {denoisedInf0, denoisedInf1, denoisedInf2};
+
+#endif /* RNNUC_TEST_DATA */ \ No newline at end of file
diff --git a/tests/use_case/noise_reduction/RNNoiseModelTests.cc b/tests/use_case/noise_reduction/RNNoiseModelTests.cc
new file mode 100644
index 0000000..705c41a
--- /dev/null
+++ b/tests/use_case/noise_reduction/RNNoiseModelTests.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "RNNoiseModel.hpp"
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "TestData_noise_reduction.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+bool RunInference(arm::app::Model& model, std::vector<int8_t> vec,
+ const size_t sizeRequired, const size_t dataInputIndex)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(dataInputIndex);
+ REQUIRE(inputTensor);
+ size_t copySz = inputTensor->bytes < sizeRequired ? inputTensor->bytes : sizeRequired;
+ const int8_t* vecData = vec.data();
+ memcpy(inputTensor->data.data, vecData, copySz);
+ return model.RunInference();
+}
+
+void genRandom(size_t bytes, std::vector<int8_t>& randomAudio)
+{
+ randomAudio.resize(bytes);
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist {-128, 127};
+ auto gen = [&dist, &mersenneGen](){
+ return dist(mersenneGen);
+ };
+ std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+}
+
+bool RunInferenceRandom(arm::app::Model& model, const size_t dataInputIndex)
+{
+ std::array<size_t, 4> inputSizes = {IFM_0_DATA_SIZE, IFM_1_DATA_SIZE, IFM_2_DATA_SIZE, IFM_3_DATA_SIZE};
+ std::vector<int8_t> randomAudio;
+ TfLiteTensor* inputTensor = model.GetInputTensor(dataInputIndex);
+ REQUIRE(inputTensor);
+ genRandom(inputTensor->bytes, randomAudio);
+
+ REQUIRE(RunInference(model, randomAudio, inputSizes[dataInputIndex], dataInputIndex));
+ return true;
+}
+
+TEST_CASE("Running random inference with TensorFlow Lite Micro and RNNoiseModel Int8", "[RNNoise]")
+{
+ arm::app::RNNoiseModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ model.ResetGruState();
+
+ for (int i = 1; i < 4; i++ ) {
+ TfLiteTensor* inputGruStateTensor = model.GetInputTensor(i);
+ auto* inputGruState = tflite::GetTensorData<int8_t>(inputGruStateTensor);
+ for (size_t tIndex = 0; tIndex < inputGruStateTensor->bytes; tIndex++) {
+ REQUIRE(inputGruState[tIndex] == arm::app::GetTensorQuantParams(inputGruStateTensor).offset);
+ }
+ }
+
+ REQUIRE(RunInferenceRandom(model, 0));
+}
+
+class TestRNNoiseModel : public arm::app::RNNoiseModel
+{
+public:
+ bool CopyGruStatesTest() {
+ return RNNoiseModel::CopyGruStates();
+ }
+
+ std::vector<std::pair<size_t, size_t>> GetStateMap() {
+ return m_gruStateMap;
+ }
+
+};
+
+template <class T>
+void printArray(size_t dataSz, T data){
+ char strhex[8];
+ std::string strdump;
+
+ for (size_t i = 0; i < dataSz; ++i) {
+ if (0 == i % 8) {
+ printf("%s\n\t", strdump.c_str());
+ strdump.clear();
+ }
+ snprintf(strhex, sizeof(strhex) - 1,
+ "0x%02x, ", data[i]);
+ strdump += std::string(strhex);
+ }
+
+ if (!strdump.empty()) {
+ printf("%s\n", strdump.c_str());
+ }
+}
+
+/* This is true for gcc x86 platform, not guaranteed for other compilers and platforms. */
+TEST_CASE("Test initial GRU out state is 0", "[RNNoise]")
+{
+ TestRNNoiseModel model{};
+ model.Init();
+
+ auto map = model.GetStateMap();
+
+ for(auto& mapping: map) {
+ TfLiteTensor* gruOut = model.GetOutputTensor(mapping.first);
+ auto* outGruState = tflite::GetTensorData<uint8_t>(gruOut);
+
+ printf("gru out state:");
+ printArray(gruOut->bytes, outGruState);
+
+ for (size_t tIndex = 0; tIndex < gruOut->bytes; tIndex++) {
+ REQUIRE(outGruState[tIndex] == 0);
+ }
+ }
+
+}
+
+TEST_CASE("Test GRU state copy", "[RNNoise]")
+{
+ TestRNNoiseModel model{};
+ model.Init();
+ REQUIRE(RunInferenceRandom(model, 0));
+
+ auto map = model.GetStateMap();
+
+ std::vector<std::vector<uint8_t>> oldStates;
+ for(auto& mapping: map) {
+
+ TfLiteTensor* gruOut = model.GetOutputTensor(mapping.first);
+ auto* outGruState = tflite::GetTensorData<uint8_t>(gruOut);
+ /* Save old output state. */
+ std::vector<uint8_t> oldState(gruOut->bytes);
+ memcpy(oldState.data(), outGruState, gruOut->bytes);
+ oldStates.push_back(oldState);
+ }
+
+ model.CopyGruStatesTest();
+ auto statesIter = oldStates.begin();
+ for(auto& mapping: map) {
+ TfLiteTensor* gruInput = model.GetInputTensor(mapping.second);
+ auto* inGruState = tflite::GetTensorData<uint8_t>(gruInput);
+ for (size_t tIndex = 0; tIndex < gruInput->bytes; tIndex++) {
+ REQUIRE((*statesIter)[tIndex] == inGruState[tIndex]);
+ }
+ statesIter++;
+ }
+
+} \ No newline at end of file
diff --git a/tests/use_case/noise_reduction/RNNoiseProcessingTests.cpp b/tests/use_case/noise_reduction/RNNoiseProcessingTests.cpp
new file mode 100644
index 0000000..24dd550
--- /dev/null
+++ b/tests/use_case/noise_reduction/RNNoiseProcessingTests.cpp
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "RNNoiseProcess.hpp"
+#include <catch.hpp>
+#include <limits>
+
+
+/* Elements [0:480] from p232_113.wav cast as fp32. */
+const std::vector<float> testWav0 = std::vector<float>{
+ -1058.0, -768.0, -737.0, -1141.0, -1015.0, -315.0, -205.0, -105.0, -150.0, 277.0,
+ 424.0, 523.0, 431.0, 256.0, 441.0, 830.0, 413.0, 421.0, 1002.0, 1186.0,
+ 926.0, 841.0, 894.0, 1419.0, 1427.0, 1102.0, 587.0, 455.0, 962.0, 904.0,
+ 504.0, -61.0, 242.0, 534.0, 407.0, -344.0, -973.0, -1178.0, -1056.0, -1454.0,
+ -1294.0, -1729.0, -2234.0, -2164.0, -2148.0, -1967.0, -2699.0, -2923.0, -2408.0, -2304.0,
+ -2567.0, -2894.0, -3104.0, -3045.0, -3210.0, -3774.0, -4159.0, -3902.0, -3525.0, -3652.0,
+ -3804.0, -3493.0, -3034.0, -2715.0, -2599.0, -2432.0, -2045.0, -1934.0, -1966.0, -2018.0,
+ -1757.0, -1296.0, -1336.0, -1124.0, -1282.0, -1001.0, -601.0, -706.0, -511.0, 278.0,
+ 678.0, 1009.0, 1088.0, 1150.0, 1815.0, 2572.0, 2457.0, 2150.0, 2566.0, 2720.0,
+ 3040.0, 3203.0, 3353.0, 3536.0, 3838.0, 3808.0, 3672.0, 3346.0, 3281.0, 3570.0,
+ 3215.0, 2684.0, 3153.0, 3167.0, 3049.0, 2837.0, 2965.0, 3167.0, 3286.0, 2572.0,
+ 1952.0, 1434.0, 1398.0, 505.0, -740.0, -898.0, -598.0, -1047.0, -1514.0, -1756.0,
+ -1457.0, -1518.0, -1497.0, -1605.0, -1364.0, -1332.0, -1306.0, -2361.0, -2809.0, -2185.0,
+ -1323.0, -1714.0, -2323.0, -1888.0, -1273.0, -1208.0, -1656.0, -1543.0, -736.0, -772.0,
+ -1113.0, -1001.0, -185.0, 468.0, 625.0, 609.0, 1080.0, 1654.0, 1678.0, 1462.0,
+ 1468.0, 2065.0, 2266.0, 1779.0, 1513.0, 1646.0, 1721.0, 2019.0, 1212.0, 688.0,
+ 1256.0, 1917.0, 2104.0, 1714.0, 1581.0, 2013.0, 1946.0, 2276.0, 2419.0, 2546.0,
+ 2229.0, 1768.0, 1691.0, 1484.0, 914.0, 591.0, -279.0, 85.0, -190.0, -647.0,
+ -1120.0, -1636.0, -2057.0, -2177.0, -1650.0, -1826.0, -2206.0, -2568.0, -2374.0, -2227.0,
+ -2013.0, -1844.0, -2079.0, -1953.0, -1609.0, -1897.0, -2185.0, -2320.0, -2212.0, -2593.0,
+ -3077.0, -2840.0, -2081.0, -1642.0, -1793.0, -1437.0, -870.0, -451.0, -242.0, -267.0,
+ 314.0, 641.0, 448.0, 721.0, 1087.0, 1720.0, 1831.0, 1381.0, 1254.0, 1873.0,
+ 2504.0, 2496.0, 2265.0, 2396.0, 2703.0, 2933.0, 3100.0, 3423.0, 3464.0, 3846.0,
+ 3890.0, 3959.0, 4047.0, 4058.0, 4327.0, 3907.0, 3505.0, 3837.0, 3471.0, 3490.0,
+ 2991.0, 3129.0, 3082.0, 2950.0, 2329.0, 1964.0, 1523.0, 1179.0, 673.0, 439.0,
+ -130.0, -878.0, -1670.0, -1648.0, -1566.0, -1721.0, -2028.0, -2308.0, -1826.0, -2027.0,
+ -2221.0, -2025.0, -1858.0, -1966.0, -2384.0, -2221.0, -1936.0, -1747.0, -2159.0, -2265.0,
+ -2186.0, -1536.0, -1520.0, -1838.0, -1919.0, -1630.0, -1450.0, -1751.0, -2751.0, -3125.0,
+ -3258.0, -3049.0, -3199.0, -3272.0, -2498.0, -1884.0, -1660.0, -1894.0, -1208.0, -736.0,
+ -346.0, -337.0, -628.0, -274.0, 71.0, 245.0, 255.0, 132.0, 433.0, 229.0,
+ 345.0, -85.0, 221.0, 278.0, 227.0, -107.0, -613.0, -215.0, -448.0, -306.0,
+ -845.0, -456.0, -390.0, -239.0, -895.0, -1151.0, -619.0, -554.0, -495.0, -1141.0,
+ -1079.0, -1342.0, -1252.0, -1668.0, -2177.0, -2478.0, -2116.0, -2163.0, -2343.0, -2380.0,
+ -2269.0, -1541.0, -1668.0, -2034.0, -2264.0, -2200.0, -2224.0, -2578.0, -2213.0, -2069.0,
+ -1774.0, -1437.0, -1845.0, -1812.0, -1654.0, -1492.0, -1914.0, -1944.0, -1870.0, -2477.0,
+ -2538.0, -2298.0, -2143.0, -2146.0, -2311.0, -1777.0, -1193.0, -1206.0, -1254.0, -743.0,
+ -84.0, -129.0, -469.0, -679.0, -114.0, 352.0, 239.0, 93.0, 381.0, 543.0,
+ 283.0, 196.0, -460.0, -443.0, -307.0, -445.0, -979.0, -1095.0, -1050.0, -1172.0,
+ -967.0, -1246.0, -1217.0, -1830.0, -2167.0, -2712.0, -2778.0, -2980.0, -3055.0, -3839.0,
+ -4253.0, -4163.0, -4240.0, -4487.0, -4861.0, -5019.0, -4875.0, -4883.0, -5109.0, -5022.0,
+ -4438.0, -4639.0, -4509.0, -4761.0, -4472.0, -4841.0, -4910.0, -5264.0, -4743.0, -4802.0,
+ -4617.0, -4302.0, -4367.0, -3968.0, -3632.0, -3434.0, -4356.0, -4329.0, -3850.0, -3603.0,
+ -3654.0, -4229.0, -4262.0, -3681.0, -3026.0, -2570.0, -2486.0, -1859.0, -1264.0, -1145.0,
+ -1064.0, -1125.0, -855.0, -400.0, -469.0, -498.0, -691.0, -475.0, -528.0, -809.0,
+ -948.0, -1047.0, -1250.0, -1691.0, -2110.0, -2790.0, -2818.0, -2589.0, -2415.0, -2710.0,
+ -2744.0, -2767.0, -2506.0, -2285.0, -2361.0, -2103.0, -2336.0, -2341.0, -2687.0, -2667.0,
+ -2925.0, -2761.0, -2816.0, -2644.0, -2456.0, -2186.0, -2092.0, -2498.0, -2773.0, -2554.0,
+ -2218.0, -2626.0, -2996.0, -3119.0, -2574.0, -2582.0, -3009.0, -2876.0, -2747.0, -2999.0
+};
+
+/* Elements [480:960] from p232_113.wav cast as fp32. */
+const std::vector<float> testWav1 = std::vector<float>{
+ -2918.0, -2418.0, -2452.0, -2172.0, -2261.0, -2337.0, -2399.0, -2209.0, -2269.0, -2509.0,
+ -2721.0, -2884.0, -2891.0, -3440.0, -3757.0, -4338.0, -4304.0, -4587.0, -4714.0, -5686.0,
+ -5699.0, -5447.0, -5008.0, -5052.0, -5135.0, -4807.0, -4515.0, -3850.0, -3804.0, -3813.0,
+ -3451.0, -3527.0, -3764.0, -3627.0, -3527.0, -3737.0, -4043.0, -4394.0, -4672.0, -4561.0,
+ -4718.0, -4737.0, -5018.0, -5187.0, -5043.0, -4734.0, -4841.0, -5363.0, -5870.0, -5697.0,
+ -5731.0, -6081.0, -6557.0, -6306.0, -6422.0, -5990.0, -5738.0, -5559.0, -5880.0, -6093.0,
+ -6718.0, -6853.0, -6966.0, -6907.0, -6887.0, -7046.0, -6902.0, -6927.0, -6754.0, -6891.0,
+ -6630.0, -6381.0, -5877.0, -5858.0, -6237.0, -6129.0, -6248.0, -6297.0, -6717.0, -6731.0,
+ -5888.0, -5239.0, -5635.0, -5808.0, -5418.0, -4780.0, -4311.0, -4082.0, -4053.0, -3274.0,
+ -3214.0, -3194.0, -3206.0, -2407.0, -1824.0, -1753.0, -1908.0, -1865.0, -1535.0, -1246.0,
+ -1434.0, -1970.0, -1890.0, -1815.0, -1949.0, -2296.0, -2356.0, -1972.0, -2156.0, -2057.0,
+ -2189.0, -1861.0, -1640.0, -1456.0, -1641.0, -1786.0, -1781.0, -1880.0, -1918.0, -2251.0,
+ -2256.0, -2608.0, -3169.0, -2983.0, -2785.0, -2948.0, -3267.0, -3856.0, -3847.0, -3534.0,
+ -3799.0, -4028.0, -4438.0, -4509.0, -4343.0, -3913.0, -3752.0, -3709.0, -3302.0, -2612.0,
+ -2848.0, -3320.0, -3049.0, -2171.0, -2342.0, -2746.0, -2618.0, -2031.0, -1166.0, -1454.0,
+ -995.0, -156.0, 573.0, 1240.0, 506.0, 296.0, 524.0, 581.0, 212.0, -191.0,
+ 169.0, -46.0, 17.0, 221.0, 586.0, 347.0, 40.0, 217.0, 951.0, 694.0,
+ 191.0, -535.0, -260.0, 252.0, 187.0, -230.0, -541.0, -124.0, -59.0, -1152.0,
+ -1397.0, -1176.0, -1195.0, -2218.0, -2960.0, -2338.0, -1895.0, -2460.0, -3599.0, -3728.0,
+ -2896.0, -2672.0, -4025.0, -4322.0, -3625.0, -3066.0, -3599.0, -4989.0, -5005.0, -3988.0,
+ -3153.0, -3921.0, -4349.0, -4444.0, -3526.0, -2896.0, -3810.0, -4252.0, -3300.0, -2234.0,
+ -2044.0, -3229.0, -2959.0, -2542.0, -1821.0, -1561.0, -1853.0, -2112.0, -1361.0, -831.0,
+ -840.0, -999.0, -1021.0, -769.0, -388.0, -377.0, -513.0, -790.0, -938.0, -911.0,
+ -1654.0, -1809.0, -2326.0, -1879.0, -1956.0, -2241.0, -2307.0, -1900.0, -1620.0, -2265.0,
+ -2170.0, -1257.0, -681.0, -1552.0, -2405.0, -2443.0, -1941.0, -1774.0, -2245.0, -2652.0,
+ -2769.0, -2622.0, -2714.0, -3558.0, -4449.0, -4894.0, -4583.0, -5179.0, -6471.0, -6526.0,
+ -5918.0, -5153.0, -5770.0, -6250.0, -5532.0, -4751.0, -4810.0, -5519.0, -5661.0, -5028.0,
+ -4737.0, -5482.0, -5837.0, -5005.0, -4200.0, -4374.0, -4962.0, -5199.0, -4464.0, -4106.0,
+ -4783.0, -5151.0, -4588.0, -4137.0, -3936.0, -4954.0, -4582.0, -3855.0, -2912.0, -2867.0,
+ -2965.0, -2919.0, -2362.0, -1800.0, -2025.0, -1931.0, -1438.0, -979.0, -1124.0, -1124.0,
+ -1130.0, -781.0, -652.0, -814.0, -976.0, -1269.0, -1052.0, -551.0, -724.0, -947.0,
+ -934.0, -856.0, -705.0, -894.0, -916.0, -861.0, -487.0, -681.0, -493.0, -902.0,
+ -547.0, -466.0, -1013.0, -1466.0, -2178.0, -1907.0, -1618.0, -2169.0, -3226.0, -2973.0,
+ -2390.0, -2227.0, -3257.0, -4297.0, -4227.0, -3022.0, -3017.0, -4268.0, -4956.0, -4199.0,
+ -3099.0, -3627.0, -4820.0, -4666.0, -3475.0, -2648.0, -3613.0, -4521.0, -3942.0, -3083.0,
+ -2832.0, -3912.0, -4289.0, -3684.0, -2728.0, -2702.0, -3279.0, -2636.0, -2261.0, -2170.0,
+ -2346.0, -2500.0, -1894.0, -1745.0, -1849.0, -2078.0, -2170.0, -1608.0, -1027.0, -1350.0,
+ -1330.0, -1128.0, -478.0, -1113.0, -1584.0, -1656.0, -1636.0, -1678.0, -1726.0, -1554.0,
+ -1434.0, -1243.0, -748.0, -463.0, -277.0, 216.0, 517.0, 1063.0, 1101.0, 839.0,
+ 724.0, 543.0, 713.0, 598.0, 806.0, 499.0, 612.0, 385.0, 830.0, 939.0,
+ 602.0, 60.0, -378.0, -300.0, -308.0, -1079.0, -1461.0, -997.0, -855.0, -1087.0,
+ -1579.0, -1314.0, -742.0, -452.0, -327.0, 224.0, -46.0, -119.0, -339.0, -22.0,
+ 172.0, -137.0, 196.0, -89.0, 34.0, -324.0, -281.0, -999.0, -1134.0, -516.0,
+ 101.0, 321.0, -584.0, -231.0, 1254.0, 1744.0, 1175.0, 684.0, 842.0, 1439.0,
+ 1507.0, 829.0, 296.0, 519.0, 716.0, 961.0, 175.0, -494.0, -501.0, -628.0,
+ -658.0, -700.0, -989.0, -1342.0, -1298.0, -1347.0, -1223.0, -1388.0, -1308.0, -1184.0,
+ -468.0, -2.0, -444.0, -388.0, -80.0, 361.0, 700.0, 120.0, 101.0, 464.0,
+ 654.0, 40.0, -586.0, -607.0, -730.0, -705.0, -844.0, -692.0, -1032.0, -1216.0
+};
+
+/* Golden RNNoise pre-processing output for [0:480] p232_113.wav */
+const std::vector<float> RNNoisePreProcessGolden0 {
+ 4.597353, -0.908727, 1.067204, -0.034760, -0.084974,
+ -0.361086, -1.494876, -0.173461, -0.671268, 0.245229,
+ 0.371219, 0.159632, 0.230595, 0.245066, 0.148395,
+ -0.660396, -0.157954, 0.136425, 0.062801, -0.049542,
+ 0.179730, 0.178653, 4.597353, -0.908727, 1.067204,
+ -0.034760, -0.084974, -0.361086, 4.597353, -0.908727,
+ 1.067204, -0.034760, -0.084974, -0.361086, -1.437083,
+ -0.722769, -0.232802, -0.178104, -0.431379, -0.591088,
+ -0.930000, 1.257937
+};
+
+/* Golden RNNoise pre-processing output for [480:960] p232_113.wav */
+const std::vector<float> RNNoisePreProcessGolden1 {
+ 11.031052, -1.249548, 2.498929, 0.492149, 0.364215,
+ 0.138582, -0.846219, 0.279253, -0.526596, 0.610061,
+ 0.820483, 0.293216, -0.047377, -0.178503, 0.229638,
+ -0.516174, 0.149612, 0.100330, 0.010542, 0.028561,
+ -0.037554, -0.094355, 6.433699, -0.340821, 1.431725,
+ 0.526909, 0.449189, 0.499668, -2.761007, 1.476633,
+ -0.702682, 0.596430, 0.619138, 1.221840, -0.739308,
+ -0.490715, -0.085385, 0.035244, 0.104252, -0.192160,
+ -0.810000, -0.430191
+};
+
+
+const std::vector<float> RNNoisePostProcessDenoiseGolden0 {
+ 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -1, 0, 0, 0, 0, -1, 0, -1, 0, 0,
+ -1, 0, -1, -1, -1, -1, -1, -1, 0, -1,
+ -1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5, 4, 5,
+ 5, 5, 5, 5, 5, 4, 5, 4, 4, 4,
+ 4, 4, 3, 4, 3, 3, 3, 2, 3, 2,
+ 2, 2, 1, 1, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, -1,
+ 0, -1, 0, -1, -1, -1, -1, -1, -2, -1,
+ -1, -2, -1, -2, -1, -2, -2, -1, -2, -1,
+ -2, -1, -1, -1, 0, -1, 0, -1, 0, 0,
+ -1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 1, 0, 1, 1, 1, 2, 1,
+ 2, 2, 2, 3, 2, 3, 2, 3, 3, 3,
+ 4, 3, 4, 3, 3, 4, 3, 5, 4, 4,
+ 4, 4, 5, 4, 5, 4, 4, 5, 4, 5,
+ 4, 4, 4, 3, 4, 3, 4, 3, 2, 3,
+ 1, 2, 0, 0, 0, 0, 0, -1, 0, -1,
+ -2, -1, -3, -1, -3, -2, -2, -3, -2, -3,
+ -2, -3, -3, -2, -4, -2, -3, -3, -3, -4,
+ -3, -4, -3, -4, -5, -4, -6, -4, -6, -6,
+ -5, -7, -5, -7, -6, -6, -7, -6, -8, -6,
+ -7, -7, -6, -8, -6, -8, -6, -7, -8, -6,
+ -9, -7, -8, -8, -7, -9, -7, -9, -8, -8,
+ -8, -6, -8, -5, -6, -5, -3, -3, 0, -1,
+ 1, 2, 3, 7, 6, 10, 11, 13, 16, 15,
+ 20, 19, 23, 24, 25, 28, 27, 31, 31, 32,
+ 34, 32, 35, 33, 35, 35, 34, 36, 33, 35,
+ 33, 32, 33, 30, 31, 28, 28, 27, 24, 25,
+ 20, 21, 18, 16, 15, 11, 12, 8, 8, 7,
+ 4, 6, 1, 3, 1, 0, 2, 0, 2, 0,
+ 0, 1, 0, 3, 0, 3, 1, 1, 4, 0,
+ 4, 1, 3, 3, 1, 4, 0, 3, 1, 0,
+ 2, -1, 1, -1, -1, 0, -3, 0, -3, 0,
+ -1, -1, 2, 0, 5, 4, 7, 11, 11, 18,
+ 15, 21, 23, 24, 31, 29, 38, 37, 42, 46,
+ 45, 54, 51, 59, 60, 61, 68, 62, 70, 66,
+ 68, 73, 69, 79, 73, 79, 76, 70, 75, 61,
+ 71, 64, 74, 85, 70, 86, 51, 92, 73
+};
+
+
+std::vector<float> RNNoiseModelOutputGolden0{0.157920, 0.392021, 0.368438, 0.258663, 0.202650,
+ 0.256764, 0.185472, 0.149062, 0.147317, 0.142133,
+ 0.148236, 0.173523, 0.197672, 0.200920, 0.198408,
+ 0.147500, 0.140215, 0.166651, 0.250242, 0.256278,
+ 0.252104, 0.241938};
+
+TEST_CASE("RNNoise preprocessing calculation test", "[RNNoise]")
+{
+ SECTION("FP32")
+ {
+ arm::app::rnn::RNNoiseProcess rnnoiseProcessor;
+ arm::app::rnn::FrameFeatures features;
+
+ rnnoiseProcessor.PreprocessFrame(testWav0.data(), testWav0.size(), features);
+ REQUIRE_THAT( features.m_featuresVec,
+ Catch::Approx( RNNoisePreProcessGolden0 ).margin(0.1));
+ rnnoiseProcessor.PreprocessFrame(testWav1.data(), testWav1.size(), features);
+ REQUIRE_THAT( features.m_featuresVec,
+ Catch::Approx( RNNoisePreProcessGolden1 ).margin(0.1));
+ }
+}
+
+
+TEST_CASE("RNNoise postprocessing test", "[RNNoise]")
+{
+ arm::app::rnn::RNNoiseProcess rnnoiseProcessor;
+ arm::app::rnn::FrameFeatures p;
+ rnnoiseProcessor.PreprocessFrame(testWav0.data(), testWav0.size(), p);
+ std::vector<float> denoised(testWav0.size());
+ rnnoiseProcessor.PostProcessFrame(RNNoiseModelOutputGolden0, p, denoised);
+
+ std::vector<float> denoisedRoundedInt;
+
+ denoisedRoundedInt.reserve(denoised.size());
+ for(auto i:denoised){
+ denoisedRoundedInt.push_back(static_cast<float>(static_cast<int>(std::roundf(i))));
+ }
+
+ REQUIRE_THAT( denoisedRoundedInt, Catch::Approx( RNNoisePostProcessDenoiseGolden0 ).margin(1));
+} \ No newline at end of file
diff --git a/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc b/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc
index 3a42dde..82fea9f 100644
--- a/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc
+++ b/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc
@@ -28,9 +28,9 @@ bool RunInference(arm::app::Model& model, const int8_t* imageData)
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+ const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ?
inputTensor->bytes :
- IFM_DATA_SIZE;
+ IFM_0_DATA_SIZE;
memcpy(inputTensor->data.data, imageData, copySz);
@@ -52,7 +52,7 @@ void TestInference(int imageIdx,arm::app::Model& model) {
TfLiteTensor* outputTensor = model.GetOutputTensor(0);
REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);