summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/use_case/ad/InferenceTestAD.cc35
-rw-r--r--tests/use_case/asr/InferenceTestWav2Letter.cc39
-rw-r--r--tests/use_case/img_class/InferenceTestMobilenetV2.cc44
-rw-r--r--tests/use_case/kws/InferenceTestMicroNetKws.cc44
-rw-r--r--tests/use_case/kws_asr/InferenceTestMicroNetKws.cc140
-rw-r--r--tests/use_case/kws_asr/InferenceTestWav2Letter.cc147
-rw-r--r--tests/use_case/noise_reduction/InferenceTestRNNoise.cc54
-rw-r--r--tests/use_case/noise_reduction/RNNNoiseUCTests.cc85
-rw-r--r--tests/use_case/object_detection/InferenceTestYoloFastest.cc108
-rw-r--r--tests/use_case/vww/InferenceVisualWakeWordModelTests.cc24
10 files changed, 358 insertions, 362 deletions
diff --git a/tests/use_case/ad/InferenceTestAD.cc b/tests/use_case/ad/InferenceTestAD.cc
index e02e923..4991a30 100644
--- a/tests/use_case/ad/InferenceTestAD.cc
+++ b/tests/use_case/ad/InferenceTestAD.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,10 +19,10 @@
#include <random>
#include "AdModel.hpp"
+#include "BufAttributes.hpp"
+#include "TensorFlowLiteMicro.hpp"
#include "TestData_ad.hpp"
#include "log_macros.h"
-#include "TensorFlowLiteMicro.hpp"
-#include "BufAttributes.hpp"
#ifndef AD_FEATURE_VEC_DATA_SIZE
#define AD_IN_FEATURE_VEC_DATA_SIZE (1024)
@@ -42,10 +42,12 @@ using namespace test;
bool RunInference(arm::app::Model& model, const int8_t vec[])
{
- TfLiteTensor *inputTensor = model.GetInputTensor(0);
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < AD_IN_FEATURE_VEC_DATA_SIZE ? inputTensor->bytes : AD_IN_FEATURE_VEC_DATA_SIZE;
+ const size_t copySz = inputTensor->bytes < AD_IN_FEATURE_VEC_DATA_SIZE
+ ? inputTensor->bytes
+ : AD_IN_FEATURE_VEC_DATA_SIZE;
memcpy(inputTensor->data.data, vec, copySz);
@@ -54,16 +56,14 @@ bool RunInference(arm::app::Model& model, const int8_t vec[])
bool RunInferenceRandom(arm::app::Model& model)
{
- TfLiteTensor *inputTensor = model.GetInputTensor(0);
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
std::random_device rndDevice;
std::mt19937 mersenneGen{rndDevice()};
std::uniform_int_distribution<short> dist{-128, 127};
- auto gen = [&dist, &mersenneGen]() {
- return dist(mersenneGen);
- };
+ auto gen = [&dist, &mersenneGen]() { return dist(mersenneGen); };
std::vector<int8_t> randomInput(inputTensor->bytes);
std::generate(std::begin(randomInput), std::end(randomInput), gen);
@@ -73,19 +73,18 @@ bool RunInferenceRandom(arm::app::Model& model)
}
template <typename T>
-void TestInference(const T *input_goldenFV, const T *output_goldenFV, arm::app::Model& model)
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
{
REQUIRE(RunInference(model, static_cast<const T*>(input_goldenFV)));
- TfLiteTensor *outputTensor = model.GetOutputTensor(0);
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
REQUIRE(outputTensor);
REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);
- for (size_t i = 0; i < outputTensor->bytes; i++)
- {
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>(((T)output_goldenFV[i])));
}
}
@@ -107,9 +106,10 @@ TEST_CASE("Running random inference with TensorFlow Lite Micro and AdModel Int8"
TEST_CASE("Running golden vector inference with TensorFlow Lite Micro and AdModel Int8", "[AD]")
{
REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_IFM_FILES);
- for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
- auto input_goldenFV = get_ifm_data_array(i);;
- auto output_goldenFV = get_ofm_data_array(i);
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
+ auto input_goldenFV = GetIfmDataArray(i);
+ ;
+ auto output_goldenFV = GetOfmDataArray(i);
DYNAMIC_SECTION("Executing inference with re-init")
{
@@ -123,7 +123,6 @@ TEST_CASE("Running golden vector inference with TensorFlow Lite Micro and AdMode
REQUIRE(model.IsInited());
TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
-
}
}
}
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
index e6012c3..991617c 100644
--- a/tests/use_case/asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,10 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include "BufAttributes.hpp"
#include "TensorFlowLiteMicro.hpp"
-#include "Wav2LetterModel.hpp"
#include "TestData_asr.hpp"
-#include "BufAttributes.hpp"
+#include "Wav2LetterModel.hpp"
#include <catch.hpp>
#include <random>
@@ -51,11 +51,9 @@ bool RunInferenceRandom(arm::app::Model& model)
std::random_device rndDevice;
std::mt19937 mersenneGen{rndDevice()};
- std::uniform_int_distribution<short> dist {-128, 127};
+ std::uniform_int_distribution<short> dist{-128, 127};
- auto gen = [&dist, &mersenneGen](){
- return dist(mersenneGen);
- };
+ auto gen = [&dist, &mersenneGen]() { return dist(mersenneGen); };
std::vector<int8_t> randomAudio(inputTensor->bytes);
std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
@@ -64,21 +62,22 @@ bool RunInferenceRandom(arm::app::Model& model)
return true;
}
-TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8", "[Wav2Letter]")
+TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8",
+ "[Wav2Letter]")
{
arm::app::Wav2LetterModel model{};
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- arm::app::asr::GetModelPointer(),
- arm::app::asr::GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::asr::GetModelPointer(),
+ arm::app::asr::GetModelLen()));
REQUIRE(model.IsInited());
REQUIRE(RunInferenceRandom(model));
}
-template<typename T>
+template <typename T>
void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
{
TfLiteTensor* inputTensor = model.GetInputTensor(0);
@@ -101,9 +100,10 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
{
REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_IFM_FILES);
- for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
- auto input_goldenFV = get_ifm_data_array(i);;
- auto output_goldenFV = get_ofm_data_array(i);
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
+ auto input_goldenFV = GetIfmDataArray(i);
+ ;
+ auto output_goldenFV = GetOfmDataArray(i);
DYNAMIC_SECTION("Executing inference with re-init")
{
@@ -111,13 +111,12 @@ TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]"
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- arm::app::asr::GetModelPointer(),
- arm::app::asr::GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::asr::GetModelPointer(),
+ arm::app::asr::GetModelLen()));
REQUIRE(model.IsInited());
TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
-
}
}
}
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
index 7921959..09eba00 100644
--- a/tests/use_case/img_class/InferenceTestMobilenetV2.cc
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,22 +14,22 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include "BufAttributes.hpp"
#include "ImageUtils.hpp"
#include "MobileNetModel.hpp"
#include "TensorFlowLiteMicro.hpp"
#include "TestData_img_class.hpp"
-#include "BufAttributes.hpp"
#include <catch.hpp>
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- namespace img_class {
- extern uint8_t* GetModelPointer();
- extern size_t GetModelLen();
- } /* namespace img_class */
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace img_class {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace img_class */
+} /* namespace app */
} /* namespace arm */
using namespace test;
@@ -39,22 +39,22 @@ bool RunInference(arm::app::Model& model, const int8_t imageData[])
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ?
- inputTensor->bytes :
- IFM_0_DATA_SIZE;
+ const size_t copySz =
+ inputTensor->bytes < IFM_0_DATA_SIZE ? inputTensor->bytes : IFM_0_DATA_SIZE;
memcpy(inputTensor->data.data, imageData, copySz);
- if(model.IsDataSigned()){
+ if (model.IsDataSigned()) {
arm::app::image::ConvertImgToInt8(inputTensor->data.data, copySz);
}
return model.RunInference();
}
-template<typename T>
-void TestInference(int imageIdx, arm::app::Model& model, T tolerance) {
- auto image = get_ifm_data_array(imageIdx);
- auto goldenFV = get_ofm_data_array(imageIdx);
+template <typename T>
+void TestInference(int imageIdx, arm::app::Model& model, T tolerance)
+{
+ auto image = GetIfmDataArray(imageIdx);
+ auto goldenFV = GetOfmDataArray(imageIdx);
REQUIRE(RunInference(model, image));
@@ -66,11 +66,11 @@ void TestInference(int imageIdx, arm::app::Model& model, T tolerance) {
REQUIRE(tensorData);
for (size_t i = 0; i < outputTensor->bytes; i++) {
- REQUIRE(static_cast<int>(tensorData[i]) == Approx(static_cast<int>((T)goldenFV[i])).epsilon(tolerance));
+ REQUIRE(static_cast<int>(tensorData[i]) ==
+ Approx(static_cast<int>((T)goldenFV[i])).epsilon(tolerance));
}
}
-
TEST_CASE("Running inference with TensorFlow Lite Micro and MobileNeV2 Uint8", "[MobileNetV2]")
{
SECTION("Executing inferences sequentially")
@@ -84,12 +84,12 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and MobileNeV2 Uint8", "
arm::app::img_class::GetModelLen()));
REQUIRE(model.IsInited());
- for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
TestInference<uint8_t>(i, model, 1);
}
}
- for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
DYNAMIC_SECTION("Executing inference with re-init")
{
arm::app::MobileNetModel model{};
diff --git a/tests/use_case/kws/InferenceTestMicroNetKws.cc b/tests/use_case/kws/InferenceTestMicroNetKws.cc
index 27c6f96..ace6684 100644
--- a/tests/use_case/kws/InferenceTestMicroNetKws.cc
+++ b/tests/use_case/kws/InferenceTestMicroNetKws.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,10 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include "BufAttributes.hpp"
#include "MicroNetKwsModel.hpp"
-#include "TestData_kws.hpp"
#include "TensorFlowLiteMicro.hpp"
-#include "BufAttributes.hpp"
+#include "TestData_kws.hpp"
#include <catch.hpp>
#include <random>
@@ -39,9 +39,8 @@ bool RunInference(arm::app::Model& model, const int8_t vec[])
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ?
- inputTensor->bytes :
- IFM_0_DATA_SIZE;
+ const size_t copySz =
+ inputTensor->bytes < IFM_0_DATA_SIZE ? inputTensor->bytes : IFM_0_DATA_SIZE;
memcpy(inputTensor->data.data, vec, copySz);
return model.RunInference();
@@ -54,11 +53,9 @@ bool RunInferenceRandom(arm::app::Model& model)
std::random_device rndDevice;
std::mt19937 mersenneGen{rndDevice()};
- std::uniform_int_distribution<short> dist {-128, 127};
+ std::uniform_int_distribution<short> dist{-128, 127};
- auto gen = [&dist, &mersenneGen](){
- return dist(mersenneGen);
- };
+ auto gen = [&dist, &mersenneGen]() { return dist(mersenneGen); };
std::vector<int8_t> randomAudio(inputTensor->bytes);
std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
@@ -67,7 +64,7 @@ bool RunInferenceRandom(arm::app::Model& model)
return true;
}
-template<typename T>
+template <typename T>
void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
{
REQUIRE(RunInference(model, input_goldenFV));
@@ -84,15 +81,16 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
}
}
-TEST_CASE("Running random inference with TensorFlow Lite Micro and MicroNetKwsModel Int8", "[MicroNetKws]")
+TEST_CASE("Running random inference with TensorFlow Lite Micro and MicroNetKwsModel Int8",
+ "[MicroNetKws]")
{
arm::app::MicroNetKwsModel model{};
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- arm::app::kws::GetModelPointer(),
- arm::app::kws::GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::kws::GetModelPointer(),
+ arm::app::kws::GetModelLen()));
REQUIRE(model.IsInited());
REQUIRE(RunInferenceRandom(model));
@@ -101,9 +99,10 @@ TEST_CASE("Running random inference with TensorFlow Lite Micro and MicroNetKwsMo
TEST_CASE("Running inference with TensorFlow Lite Micro and MicroNetKwsModel int8", "[MicroNetKws]")
{
REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES);
- for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
- const int8_t* input_goldenFV = get_ifm_data_array(i);;
- const int8_t* output_goldenFV = get_ofm_data_array(i);
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
+ const int8_t* input_goldenFV = GetIfmDataArray(i);
+ ;
+ const int8_t* output_goldenFV = GetOfmDataArray(i);
DYNAMIC_SECTION("Executing inference with re-init " << i)
{
@@ -111,13 +110,12 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and MicroNetKwsModel int
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- arm::app::kws::GetModelPointer(),
- arm::app::kws::GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::kws::GetModelPointer(),
+ arm::app::kws::GetModelLen()));
REQUIRE(model.IsInited());
TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
-
}
}
}
diff --git a/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc b/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
index 9fd8171..4cfd784 100644
--- a/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
+++ b/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,10 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include "BufAttributes.hpp"
#include "MicroNetKwsModel.hpp"
-#include "TestData_kws.hpp"
#include "TensorFlowLiteMicro.hpp"
-#include "BufAttributes.hpp"
+#include "TestData_kws.hpp"
#include <catch.hpp>
#include <random>
@@ -35,87 +35,89 @@ namespace app {
namespace test {
namespace kws {
-bool RunInference(arm::app::Model& model, const int8_t vec[]) {
- TfLiteTensor* inputTensor = model.GetInputTensor(0);
- REQUIRE(inputTensor);
-
- const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ?
- inputTensor->bytes :
- IFM_0_DATA_SIZE;
- memcpy(inputTensor->data.data, vec, copySz);
-
- return model.RunInference();
-}
+ bool RunInference(arm::app::Model& model, const int8_t vec[])
+ {
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
-bool RunInferenceRandom(arm::app::Model& model) {
- TfLiteTensor* inputTensor = model.GetInputTensor(0);
- REQUIRE(inputTensor);
+ const size_t copySz =
+ inputTensor->bytes < IFM_0_DATA_SIZE ? inputTensor->bytes : IFM_0_DATA_SIZE;
+ memcpy(inputTensor->data.data, vec, copySz);
- std::random_device rndDevice;
- std::mt19937 mersenneGen{rndDevice()};
- std::uniform_int_distribution<short> dist{-128, 127};
-
- auto gen = [&dist, &mersenneGen]() {
- return dist(mersenneGen);
- };
-
- std::vector<int8_t> randomAudio(inputTensor->bytes);
- std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+ return model.RunInference();
+ }
- REQUIRE(RunInference(model, randomAudio.data()));
- return true;
-}
+ bool RunInferenceRandom(arm::app::Model& model)
+ {
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
-template<typename T>
-void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model) {
- REQUIRE(RunInference(model, input_goldenFV));
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist{-128, 127};
- TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+ auto gen = [&dist, &mersenneGen]() { return dist(mersenneGen); };
- REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
- auto tensorData = tflite::GetTensorData<T>(outputTensor);
- REQUIRE(tensorData);
+ std::vector<int8_t> randomAudio(inputTensor->bytes);
+ std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
- for (size_t i = 0; i < outputTensor->bytes; i++) {
- REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>((T) output_goldenFV[i]));
+ REQUIRE(RunInference(model, randomAudio.data()));
+ return true;
}
-}
-TEST_CASE("Running random inference with Tflu and MicroNetKwsModel Int8", "[MicroNetKws]") {
- arm::app::MicroNetKwsModel model{};
+ template <typename T>
+ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+ {
+ REQUIRE(RunInference(model, input_goldenFV));
- REQUIRE_FALSE(model.IsInited());
- REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- arm::app::kws::GetModelPointer(),
- arm::app::kws::GetModelLen()));
- REQUIRE(model.IsInited());
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
- REQUIRE(RunInferenceRandom(model));
-}
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
-TEST_CASE("Running inference with Tflu and MicroNetKwsModel Int8", "[MicroNetKws]") {
- REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES);
- for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
- const int8_t* input_goldenFV = get_ifm_data_array(i);
- const int8_t* output_goldenFV = get_ofm_data_array(i);
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
+ REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>((T)output_goldenFV[i]));
+ }
+ }
- DYNAMIC_SECTION("Executing inference with re-init") {
- arm::app::MicroNetKwsModel model{};
+ TEST_CASE("Running random inference with Tflu and MicroNetKwsModel Int8", "[MicroNetKws]")
+ {
+ arm::app::MicroNetKwsModel model{};
- REQUIRE_FALSE(model.IsInited());
- REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- arm::app::kws::GetModelPointer(),
- arm::app::kws::GetModelLen()));
- REQUIRE(model.IsInited());
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init(arm::app::tensorArena,
+ sizeof(arm::app::tensorArena),
+ arm::app::kws::GetModelPointer(),
+ arm::app::kws::GetModelLen()));
+ REQUIRE(model.IsInited());
- TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+ REQUIRE(RunInferenceRandom(model));
+ }
+ TEST_CASE("Running inference with Tflu and MicroNetKwsModel Int8", "[MicroNetKws]")
+ {
+ REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES);
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
+ const int8_t* input_goldenFV = GetIfmDataArray(i);
+ const int8_t* output_goldenFV = GetOfmDataArray(i);
+
+ DYNAMIC_SECTION("Executing inference with re-init")
+ {
+ arm::app::MicroNetKwsModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init(arm::app::tensorArena,
+ sizeof(arm::app::tensorArena),
+ arm::app::kws::GetModelPointer(),
+ arm::app::kws::GetModelLen()));
+ REQUIRE(model.IsInited());
+
+ TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+ }
}
}
-}
-} //namespace
-} //namespace
+} // namespace kws
+} // namespace test
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
index 72dcadc..b49b886 100644
--- a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,10 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include "BufAttributes.hpp"
#include "TensorFlowLiteMicro.hpp"
-#include "Wav2LetterModel.hpp"
#include "TestData_asr.hpp"
-#include "BufAttributes.hpp"
+#include "Wav2LetterModel.hpp"
#include <catch.hpp>
#include <random>
@@ -35,94 +35,91 @@ namespace app {
namespace test {
namespace asr {
-bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
-{
- TfLiteTensor* inputTensor = model.GetInputTensor(0);
- REQUIRE(inputTensor);
+ bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
+ {
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
- memcpy(inputTensor->data.data, vec, copySz);
+ memcpy(inputTensor->data.data, vec, copySz);
- return model.RunInference();
-}
-
-bool RunInferenceRandom(arm::app::Model& model)
-{
- TfLiteTensor* inputTensor = model.GetInputTensor(0);
- REQUIRE(inputTensor);
+ return model.RunInference();
+ }
- std::random_device rndDevice;
- std::mt19937 mersenneGen{rndDevice()};
- std::uniform_int_distribution<short> dist {-128, 127};
+ bool RunInferenceRandom(arm::app::Model& model)
+ {
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
- auto gen = [&dist, &mersenneGen](){
- return dist(mersenneGen);
- };
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist{-128, 127};
- std::vector<int8_t> randomAudio(inputTensor->bytes);
- std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+ auto gen = [&dist, &mersenneGen]() { return dist(mersenneGen); };
- REQUIRE(RunInference(model, randomAudio.data(), inputTensor->bytes));
- return true;
-}
+ std::vector<int8_t> randomAudio(inputTensor->bytes);
+ std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
-TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
-{
- arm::app::Wav2LetterModel model{};
+ REQUIRE(RunInference(model, randomAudio.data(), inputTensor->bytes));
+ return true;
+ }
- REQUIRE_FALSE(model.IsInited());
- REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- arm::app::asr::GetModelPointer(),
- arm::app::asr::GetModelLen()));
- REQUIRE(model.IsInited());
+ TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
+ {
+ arm::app::Wav2LetterModel model{};
- REQUIRE(RunInferenceRandom(model));
-}
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init(arm::app::tensorArena,
+ sizeof(arm::app::tensorArena),
+ arm::app::asr::GetModelPointer(),
+ arm::app::asr::GetModelLen()));
+ REQUIRE(model.IsInited());
+ REQUIRE(RunInferenceRandom(model));
+ }
-template<typename T>
-void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
-{
- TfLiteTensor* inputTensor = model.GetInputTensor(0);
- REQUIRE(inputTensor);
+ template <typename T>
+ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+ {
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
- REQUIRE(RunInference(model, input_goldenFV, inputTensor->bytes));
+ REQUIRE(RunInference(model, input_goldenFV, inputTensor->bytes));
- TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
- REQUIRE(outputTensor);
- REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
- auto tensorData = tflite::GetTensorData<T>(outputTensor);
- REQUIRE(tensorData);
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
- for (size_t i = 0; i < outputTensor->bytes; i++) {
- REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>(((T)output_goldenFV[i])));
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
+ REQUIRE(static_cast<int>(tensorData[i]) == static_cast<int>(((T)output_goldenFV[i])));
+ }
}
-}
-
-TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
-{
- REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES);
- for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
- auto input_goldenFV = get_ifm_data_array(i);;
- auto output_goldenFV = get_ofm_data_array(i);
-
- DYNAMIC_SECTION("Executing inference with re-init")
- {
- arm::app::Wav2LetterModel model{};
-
- REQUIRE_FALSE(model.IsInited());
- REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- arm::app::asr::GetModelPointer(),
- arm::app::asr::GetModelLen()));
- REQUIRE(model.IsInited());
-
- TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+ TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
+ {
+ REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES);
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
+ auto input_goldenFV = GetIfmDataArray(i);
+ ;
+ auto output_goldenFV = GetOfmDataArray(i);
+
+ DYNAMIC_SECTION("Executing inference with re-init")
+ {
+ arm::app::Wav2LetterModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init(arm::app::tensorArena,
+ sizeof(arm::app::tensorArena),
+ arm::app::asr::GetModelPointer(),
+ arm::app::asr::GetModelLen()));
+ REQUIRE(model.IsInited());
+
+ TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+ }
}
}
-}
-} //namespace
-} //namespace
+} // namespace asr
+} // namespace test
diff --git a/tests/use_case/noise_reduction/InferenceTestRNNoise.cc b/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
index 3cdaee1..17ce9ac 100644
--- a/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
+++ b/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,10 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "TensorFlowLiteMicro.hpp"
+#include "BufAttributes.hpp"
#include "RNNoiseModel.hpp"
+#include "TensorFlowLiteMicro.hpp"
#include "TestData_noise_reduction.hpp"
-#include "BufAttributes.hpp"
#include <catch.hpp>
#include <random>
@@ -50,15 +50,13 @@ namespace noise_reduction {
{
std::random_device rndDevice;
std::mt19937 mersenneGen{rndDevice()};
- std::uniform_int_distribution<short> dist {-128, 127};
+ std::uniform_int_distribution<short> dist{-128, 127};
- auto gen = [&dist, &mersenneGen](){
- return dist(mersenneGen);
- };
+ auto gen = [&dist, &mersenneGen]() { return dist(mersenneGen); };
std::vector<std::vector<int8_t>> randomInput{NUMBER_OF_IFM_FILES};
for (size_t i = 0; i < model.GetNumInputs(); ++i) {
- TfLiteTensor *inputTensor = model.GetInputTensor(i);
+ TfLiteTensor* inputTensor = model.GetInputTensor(i);
REQUIRE(inputTensor);
randomInput[i].resize(inputTensor->bytes);
std::generate(std::begin(randomInput[i]), std::end(randomInput[i]), gen);
@@ -82,8 +80,10 @@ namespace noise_reduction {
REQUIRE(RunInferenceRandom(model));
}
- template<typename T>
- void TestInference(const std::vector<std::vector<T>> input_goldenFV, const std::vector<std::vector<T>> output_goldenFV, arm::app::Model& model)
+ template <typename T>
+ void TestInference(const std::vector<std::vector<T>> input_goldenFV,
+ const std::vector<std::vector<T>> output_goldenFV,
+ arm::app::Model& model)
{
for (size_t i = 0; i < model.GetNumInputs(); ++i) {
TfLiteTensor* inputTensor = model.GetInputTensor(i);
@@ -93,41 +93,37 @@ namespace noise_reduction {
REQUIRE(RunInference(model, input_goldenFV));
for (size_t i = 0; i < model.GetNumOutputs(); ++i) {
- TfLiteTensor *outputTensor = model.GetOutputTensor(i);
+ TfLiteTensor* outputTensor = model.GetOutputTensor(i);
REQUIRE(outputTensor);
auto tensorData = tflite::GetTensorData<T>(outputTensor);
REQUIRE(tensorData);
for (size_t j = 0; j < outputTensor->bytes; j++) {
- REQUIRE(static_cast<int>(tensorData[j]) == static_cast<int>((output_goldenFV[i][j])));
+ REQUIRE(static_cast<int>(tensorData[j]) ==
+ static_cast<int>((output_goldenFV[i][j])));
}
}
}
TEST_CASE("Running inference with Tflu and RNNoise Int8", "[RNNoise]")
{
- std::vector<std::vector<int8_t>> goldenInputFV {NUMBER_OF_IFM_FILES};
- std::vector<std::vector<int8_t>> goldenOutputFV {NUMBER_OF_OFM_FILES};
+ std::vector<std::vector<int8_t>> goldenInputFV{NUMBER_OF_IFM_FILES};
+ std::vector<std::vector<int8_t>> goldenOutputFV{NUMBER_OF_OFM_FILES};
- std::array<size_t, NUMBER_OF_IFM_FILES> inputSizes = {IFM_0_DATA_SIZE,
- IFM_1_DATA_SIZE,
- IFM_2_DATA_SIZE,
- IFM_3_DATA_SIZE};
+ std::array<size_t, NUMBER_OF_IFM_FILES> inputSizes = {
+ IFM_0_DATA_SIZE, IFM_1_DATA_SIZE, IFM_2_DATA_SIZE, IFM_3_DATA_SIZE};
- std::array<size_t, NUMBER_OF_OFM_FILES> outputSizes = {OFM_0_DATA_SIZE,
- OFM_1_DATA_SIZE,
- OFM_2_DATA_SIZE,
- OFM_3_DATA_SIZE,
- OFM_4_DATA_SIZE};
+ std::array<size_t, NUMBER_OF_OFM_FILES> outputSizes = {
+ OFM_0_DATA_SIZE, OFM_1_DATA_SIZE, OFM_2_DATA_SIZE, OFM_3_DATA_SIZE, OFM_4_DATA_SIZE};
- for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
+ for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) {
goldenInputFV[i].resize(inputSizes[i]);
- std::memcpy(goldenInputFV[i].data(), get_ifm_data_array(i), inputSizes[i]);
+ std::memcpy(goldenInputFV[i].data(), GetIfmDataArray(i), inputSizes[i]);
}
- for (uint32_t i = 0 ; i < NUMBER_OF_OFM_FILES; ++i) {
+ for (uint32_t i = 0; i < NUMBER_OF_OFM_FILES; ++i) {
goldenOutputFV[i].resize(outputSizes[i]);
- std::memcpy(goldenOutputFV[i].data(), get_ofm_data_array(i), outputSizes[i]);
+ std::memcpy(goldenOutputFV[i].data(), GetOfmDataArray(i), outputSizes[i]);
}
DYNAMIC_SECTION("Executing inference with re-init")
@@ -146,4 +142,4 @@ namespace noise_reduction {
}
} /* namespace noise_reduction */
-} /* namespace test */
+} /* namespace test */
diff --git a/tests/use_case/noise_reduction/RNNNoiseUCTests.cc b/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
index d835c21..7f9ff19 100644
--- a/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
+++ b/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,13 +14,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "RNNoiseModel.hpp"
-#include "UseCaseHandler.hpp"
+#include "BufAttributes.hpp"
#include "InputFiles.hpp"
+#include "Profiler.hpp"
#include "RNNUCTestCaseData.hpp"
-#include "BufAttributes.hpp"
+#include "RNNoiseModel.hpp"
+#include "UseCaseHandler.hpp"
#include "hal.h"
-#include "Profiler.hpp"
#include <catch.hpp>
@@ -34,13 +34,13 @@ namespace app {
} /* namespace app */
} /* namespace arm */
-#define PLATFORM hal_platform_init();
+#define PLATFORM hal_platform_init();
-#define CONTEXT \
-arm::app::ApplicationContext caseContext; \
-arm::app::Profiler profiler{"noise_reduction"}; \
-caseContext.Set<arm::app::Profiler&>("profiler", profiler); \
-caseContext.Set<arm::app::RNNoiseModel&>("model", model);
+#define CONTEXT \
+ arm::app::ApplicationContext caseContext; \
+ arm::app::Profiler profiler{"noise_reduction"}; \
+ caseContext.Set<arm::app::Profiler&>("profiler", profiler); \
+ caseContext.Set<arm::app::RNNoiseModel&>("model", model);
TEST_CASE("Verify output tensor memory dump")
{
@@ -56,12 +56,12 @@ TEST_CASE("Verify output tensor memory dump")
/* Populate the output tensors */
const size_t numOutputs = model.GetNumOutputs();
- size_t sizeToWrite = 0;
- size_t lastTensorSize = model.GetOutputTensor(numOutputs - 1)->bytes;
+ size_t sizeToWrite = 0;
+ size_t lastTensorSize = model.GetOutputTensor(numOutputs - 1)->bytes;
for (size_t i = 0; i < numOutputs; ++i) {
TfLiteTensor* tensor = model.GetOutputTensor(i);
- auto* tData = tflite::GetTensorData<uint8_t>(tensor);
+ auto* tData = tflite::GetTensorData<uint8_t>(tensor);
if (tensor->bytes > 0) {
memset(tData, static_cast<uint8_t>(i), tensor->bytes);
@@ -69,7 +69,6 @@ TEST_CASE("Verify output tensor memory dump")
}
}
-
SECTION("Positive use case")
{
/* Run the memory dump */
@@ -80,7 +79,7 @@ TEST_CASE("Verify output tensor memory dump")
size_t k = 0;
for (size_t i = 0; i < numOutputs && k < memPool.size(); ++i) {
TfLiteTensor* tensor = model.GetOutputTensor(i);
- auto* tData = tflite::GetTensorData<uint8_t>(tensor);
+ auto* tData = tflite::GetTensorData<uint8_t>(tensor);
for (size_t j = 0; j < tensor->bytes && k < memPool.size(); ++j) {
REQUIRE(tData[j] == memPool[k++]);
@@ -126,28 +125,31 @@ TEST_CASE("Inference run all clips", "[RNNoise]")
REQUIRE(arm::app::NoiseReductionHandler(caseContext, true));
}
-std::function<uint32_t(const uint32_t)> get_golden_input_p232_208_array_size(const uint32_t numberOfFeatures) {
+std::function<uint32_t(const uint32_t)>
+get_golden_input_p232_208_array_size(const uint32_t numberOfFeatures)
+{
- return [numberOfFeatures](const uint32_t) -> uint32_t{
- return numberOfFeatures;
- };
+ return [numberOfFeatures](const uint32_t) -> uint32_t { return numberOfFeatures; };
}
-const char* get_test_filename(const uint32_t idx) {
- auto name = get_filename(idx);
+const char* get_test_filename(const uint32_t idx)
+{
+ auto name = GetFilename(idx);
REQUIRE(std::string("p232_208.wav") == name);
return "p232_208.wav";
}
-void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
+void testInfByIndex(std::vector<uint32_t>& numberOfInferences)
+{
PLATFORM
arm::app::RNNoiseModel model;
CONTEXT
- caseContext.Set<std::function<const int16_t*(const uint32_t)>>("features", get_audio_array);
- caseContext.Set<std::function<const char* (const uint32_t)>>("featureFileNames", get_test_filename);
+ caseContext.Set<std::function<const int16_t*(const uint32_t)>>("features", GetAudioArray);
+ caseContext.Set<std::function<const char*(const uint32_t)>>("featureFileNames",
+ get_test_filename);
caseContext.Set<uint32_t>("frameLength", arm::app::rnn::g_FrameLength);
caseContext.Set<uint32_t>("frameStride", arm::app::rnn::g_FrameStride);
caseContext.Set<uint32_t>("numInputFeatures", arm::app::rnn::g_NumInputFeatures);
@@ -160,18 +162,21 @@ void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
size_t oneInferenceOutSizeBytes = arm::app::rnn::g_FrameLength * sizeof(int16_t);
auto infIndex = 0;
- for (auto numInf: numberOfInferences) {
- DYNAMIC_SECTION("Number of features: "<< numInf) {
- caseContext.Set<uint32_t>("clipIndex", 1); /* Only getting p232_208.wav for tests. */
+ for (auto numInf : numberOfInferences) {
+ DYNAMIC_SECTION("Number of features: " << numInf)
+ {
+ caseContext.Set<uint32_t>("clipIndex", 1); /* Only getting p232_208.wav for tests. */
uint32_t audioSizeInput = numInf * arm::app::rnn::g_FrameLength;
- caseContext.Set<std::function<uint32_t(const uint32_t)>>("featureSizes",
- get_golden_input_p232_208_array_size(audioSizeInput));
+ caseContext.Set<std::function<uint32_t(const uint32_t)>>(
+ "featureSizes", get_golden_input_p232_208_array_size(audioSizeInput));
- size_t headerNumBytes = 4 + 12 + 4; /* Filename length, filename (12 for p232_208.wav), dump size. */
- size_t footerNumBytes = 4; /* Eof value. */
- size_t memDumpMaxLenBytes = headerNumBytes + footerNumBytes + oneInferenceOutSizeBytes * numInf;
+ size_t headerNumBytes =
+ 4 + 12 + 4; /* Filename length, filename (12 for p232_208.wav), dump size. */
+ size_t footerNumBytes = 4; /* Eof value. */
+ size_t memDumpMaxLenBytes =
+ headerNumBytes + footerNumBytes + oneInferenceOutSizeBytes * numInf;
- std::vector<uint8_t > memDump(memDumpMaxLenBytes);
+ std::vector<uint8_t> memDump(memDumpMaxLenBytes);
size_t undefMemDumpBytesWritten = 0;
caseContext.Set<size_t>("MEM_DUMP_LEN", memDumpMaxLenBytes);
caseContext.Set<uint8_t*>("MEM_DUMP_BASE_ADDR", memDump.data());
@@ -199,8 +204,9 @@ void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
TEST_CASE("Inference by index - one inference", "[RNNoise]")
{
- auto totalAudioSize = get_audio_array_size(1);
- REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
+ auto totalAudioSize = GetAudioArraySize(1);
+ REQUIRE(64757 ==
+ totalAudioSize); /* Checking that the input file is as expected and has not changed. */
/* Run 1 inference */
std::vector<uint32_t> numberOfInferences = {1};
@@ -209,8 +215,9 @@ TEST_CASE("Inference by index - one inference", "[RNNoise]")
TEST_CASE("Inference by index - several inferences", "[RNNoise]")
{
- auto totalAudioSize = get_audio_array_size(1);
- REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
+ auto totalAudioSize = GetAudioArraySize(1);
+ REQUIRE(64757 ==
+ totalAudioSize); /* Checking that the input file is as expected and has not changed. */
/* 3 different inference amounts: 1, 2 and all inferences required to cover total feature set */
uint32_t totalInferences = totalAudioSize / arm::app::rnn::g_FrameLength;
diff --git a/tests/use_case/object_detection/InferenceTestYoloFastest.cc b/tests/use_case/object_detection/InferenceTestYoloFastest.cc
index f1c3719..b3cf37d 100644
--- a/tests/use_case/object_detection/InferenceTestYoloFastest.cc
+++ b/tests/use_case/object_detection/InferenceTestYoloFastest.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,58 +14,51 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include "log_macros.h"
-#include "ImageUtils.hpp"
-#include "YoloFastestModel.hpp"
-#include "TensorFlowLiteMicro.hpp"
+#include "BufAttributes.hpp"
#include "DetectorPostProcessing.hpp"
+#include "ImageUtils.hpp"
#include "InputFiles.hpp"
-#include "BufAttributes.hpp"
+#include "TensorFlowLiteMicro.hpp"
+#include "YoloFastestModel.hpp"
+#include "log_macros.h"
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- namespace object_detection {
- extern uint8_t* GetModelPointer();
- extern size_t GetModelLen();
- } /* namespace object_detection */
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace object_detection {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace object_detection */
+} /* namespace app */
} /* namespace arm */
#include <catch.hpp>
-void GetExpectedResults(std::vector<std::vector<arm::app::object_detection::DetectionResult>> &expected_results)
+void GetExpectedResults(
+ std::vector<std::vector<arm::app::object_detection::DetectionResult>>& expected_results)
{
/* Img1
0) (0.999246) -> Detection box: {x=89,y=17,w=41,h=56}
1) (0.995367) -> Detection box: {x=27,y=81,w=48,h=53}
*/
- expected_results.push_back({
- arm::app::object_detection::DetectionResult(0.99,89,17,41,56),
- arm::app::object_detection::DetectionResult(0.99,27,81,48,53)
- });
+ expected_results.push_back({arm::app::object_detection::DetectionResult(0.99, 89, 17, 41, 56),
+ arm::app::object_detection::DetectionResult(0.99, 27, 81, 48, 53)});
/* Img2
0) (0.998107) -> Detection box: {x=87,y=35,w=53,h=64}
*/
- expected_results.push_back({
- arm::app::object_detection::DetectionResult(0.99,87,35,53,64)
- });
+ expected_results.push_back({arm::app::object_detection::DetectionResult(0.99, 87, 35, 53, 64)});
/* Img3
0) (0.999244) -> Detection box: {x=105,y=73,w=58,h=66}
1) (0.985984) -> Detection box: {x=34,y=40,w=70,h=95}
*/
- expected_results.push_back({
- arm::app::object_detection::DetectionResult(0.99,105,73,58,66),
- arm::app::object_detection::DetectionResult(0.98,34,40,70,95)
- });
+ expected_results.push_back({arm::app::object_detection::DetectionResult(0.99, 105, 73, 58, 66),
+ arm::app::object_detection::DetectionResult(0.98, 34, 40, 70, 95)});
/* Img4
0) (0.993294) -> Detection box: {x=22,y=43,w=39,h=53}
1) (0.992021) -> Detection box: {x=63,y=60,w=38,h=45}
*/
- expected_results.push_back({
- arm::app::object_detection::DetectionResult(0.99,22,43,39,53),
- arm::app::object_detection::DetectionResult(0.99,63,60,38,45)
- });
+ expected_results.push_back({arm::app::object_detection::DetectionResult(0.99, 22, 43, 39, 53),
+ arm::app::object_detection::DetectionResult(0.99, 63, 60, 38, 45)});
}
bool RunInference(arm::app::Model& model, const uint8_t imageData[])
@@ -73,41 +66,43 @@ bool RunInference(arm::app::Model& model, const uint8_t imageData[])
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < IMAGE_DATA_SIZE ?
- inputTensor->bytes : IMAGE_DATA_SIZE;
+ const size_t copySz =
+ inputTensor->bytes < IMAGE_DATA_SIZE ? inputTensor->bytes : IMAGE_DATA_SIZE;
- arm::app::image::RgbToGrayscale(imageData,inputTensor->data.uint8,copySz);
+ arm::app::image::RgbToGrayscale(imageData, inputTensor->data.uint8, copySz);
- if(model.IsDataSigned()){
+ if (model.IsDataSigned()) {
arm::app::image::ConvertImgToInt8(inputTensor->data.data, copySz);
}
return model.RunInference();
}
-template<typename T>
-void TestInferenceDetectionResults(int imageIdx, arm::app::Model& model, T tolerance) {
+template <typename T>
+void TestInferenceDetectionResults(int imageIdx, arm::app::Model& model, T tolerance)
+{
std::vector<arm::app::object_detection::DetectionResult> results;
- auto image = get_img_array(imageIdx);
+ auto image = GetImgArray(imageIdx);
TfLiteIntArray* inputShape = model.GetInputShape(0);
- auto nCols = inputShape->data[arm::app::YoloFastestModel::ms_inputColsIdx];
- auto nRows = inputShape->data[arm::app::YoloFastestModel::ms_inputRowsIdx];
+ auto nCols = inputShape->data[arm::app::YoloFastestModel::ms_inputColsIdx];
+ auto nRows = inputShape->data[arm::app::YoloFastestModel::ms_inputRowsIdx];
REQUIRE(RunInference(model, image));
-
std::vector<TfLiteTensor*> output_arr{model.GetOutputTensor(0), model.GetOutputTensor(1)};
- for (size_t i =0; i < output_arr.size(); i++) {
+ for (size_t i = 0; i < output_arr.size(); i++) {
REQUIRE(output_arr[i]);
REQUIRE(tflite::GetTensorData<T>(output_arr[i]));
}
- const arm::app::object_detection::PostProcessParams postProcessParams {
- nRows, nCols, arm::app::object_detection::originalImageSize,
- arm::app::object_detection::anchor1, arm::app::object_detection::anchor2
- };
+ const arm::app::object_detection::PostProcessParams postProcessParams{
+ nRows,
+ nCols,
+ arm::app::object_detection::originalImageSize,
+ arm::app::object_detection::anchor1,
+ arm::app::object_detection::anchor2};
arm::app::DetectorPostProcess postp{output_arr[0], output_arr[1], results, postProcessParams};
postp.DoPostProcess();
@@ -117,18 +112,21 @@ void TestInferenceDetectionResults(int imageIdx, arm::app::Model& model, T toler
/* Validate got the same number of boxes */
REQUIRE(results.size() == expected_results[imageIdx].size());
-
- for (int i=0; i < (int)results.size(); i++) {
+ for (int i = 0; i < (int)results.size(); i++) {
/* Validate confidence and box dimensions */
- REQUIRE(std::abs(results[i].m_normalisedVal - expected_results[imageIdx][i].m_normalisedVal) < 0.1);
- REQUIRE(static_cast<int>(results[i].m_x0) == Approx(static_cast<int>((T)expected_results[imageIdx][i].m_x0)).epsilon(tolerance));
- REQUIRE(static_cast<int>(results[i].m_y0) == Approx(static_cast<int>((T)expected_results[imageIdx][i].m_y0)).epsilon(tolerance));
- REQUIRE(static_cast<int>(results[i].m_w) == Approx(static_cast<int>((T)expected_results[imageIdx][i].m_w)).epsilon(tolerance));
- REQUIRE(static_cast<int>(results[i].m_h) == Approx(static_cast<int>((T)expected_results[imageIdx][i].m_h)).epsilon(tolerance));
+ REQUIRE(std::abs(results[i].m_normalisedVal -
+ expected_results[imageIdx][i].m_normalisedVal) < 0.1);
+ REQUIRE(static_cast<int>(results[i].m_x0) ==
+ Approx(static_cast<int>((T)expected_results[imageIdx][i].m_x0)).epsilon(tolerance));
+ REQUIRE(static_cast<int>(results[i].m_y0) ==
+ Approx(static_cast<int>((T)expected_results[imageIdx][i].m_y0)).epsilon(tolerance));
+ REQUIRE(static_cast<int>(results[i].m_w) ==
+ Approx(static_cast<int>((T)expected_results[imageIdx][i].m_w)).epsilon(tolerance));
+ REQUIRE(static_cast<int>(results[i].m_h) ==
+ Approx(static_cast<int>((T)expected_results[imageIdx][i].m_h)).epsilon(tolerance));
}
}
-
TEST_CASE("Running inference with TensorFlow Lite Micro and YoloFastest", "[YoloFastest]")
{
SECTION("Executing inferences sequentially")
@@ -142,12 +140,12 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and YoloFastest", "[Yolo
arm::app::object_detection::GetModelLen()));
REQUIRE(model.IsInited());
- for (uint32_t i = 0 ; i < NUMBER_OF_FILES; ++i) {
+ for (uint32_t i = 0; i < NUMBER_OF_FILES; ++i) {
TestInferenceDetectionResults<uint8_t>(i, model, 1);
}
}
- for (uint32_t i = 0 ; i < NUMBER_OF_FILES; ++i) {
+ for (uint32_t i = 0; i < NUMBER_OF_FILES; ++i) {
DYNAMIC_SECTION("Executing inference with re-init")
{
arm::app::YoloFastestModel model{};
diff --git a/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc b/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc
index 4d3092e..82bffc6 100644
--- a/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc
+++ b/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,9 +16,9 @@
*/
#include "ImageUtils.hpp"
+#include "TensorFlowLiteMicro.hpp"
#include "TestData_vww.hpp"
#include "VisualWakeWordModel.hpp"
-#include "TensorFlowLiteMicro.hpp"
#include <catch.hpp>
@@ -27,24 +27,24 @@ bool RunInference(arm::app::Model& model, const int8_t* imageData)
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
- const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ?
- inputTensor->bytes :
- IFM_0_DATA_SIZE;
+ const size_t copySz =
+ inputTensor->bytes < IFM_0_DATA_SIZE ? inputTensor->bytes : IFM_0_DATA_SIZE;
memcpy(inputTensor->data.data, imageData, copySz);
- if(model.IsDataSigned()){
+ if (model.IsDataSigned()) {
arm::app::image::ConvertImgToInt8(inputTensor->data.data, copySz);
}
return model.RunInference();
}
-template<typename T>
-void TestInference(int imageIdx,arm::app::Model& model) {
+template <typename T>
+void TestInference(int imageIdx, arm::app::Model& model)
+{
- auto image = test::get_ifm_data_array(imageIdx);
- auto goldenFV = test::get_ofm_data_array(imageIdx);
+ auto image = test::GetIfmDataArray(imageIdx);
+ auto goldenFV = test::GetOfmDataArray(imageIdx);
REQUIRE(RunInference(model, image));
@@ -56,7 +56,7 @@ void TestInference(int imageIdx,arm::app::Model& model) {
REQUIRE(tensorData);
for (size_t i = 0; i < outputTensor->bytes; i++) {
- auto testVal = static_cast<int>(tensorData[i]);
+ auto testVal = static_cast<int>(tensorData[i]);
auto goldenVal = static_cast<int>(goldenFV[i]);
CHECK(testVal == goldenVal);
}