summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLiam Barry <liam.barry@arm.com>2022-05-09 17:06:19 +0100
committerKshitij Sisodia <kshitij.sisodia@arm.com>2022-05-12 16:10:06 +0000
commit213a543dd0d07b2f8d51a9c7e2055fd99291c960 (patch)
tree7357c9961f395131140f8937e732fee68963dc63
parentdd44d29775e53ad311e1997bb8a7670d0ee6383e (diff)
downloadml-embedded-evaluation-kit-213a543dd0d07b2f8d51a9c7e2055fd99291c960.tar.gz
MLECO-3186: Each use case should same namespace convention as KWS and ASR
Certain UCs required additional work due to case context variables which also became part of a namespace in generated files. Solution was to declare these extra variables as part of the UC namespace in the respective model.hpp files. Additional changes to standardise use of namespaces may be required - proposing new task. Minor typo and rewording of customizing.md in relevant sections included. Signed-off-by: Liam Barry <liam.barry@arm.com> Change-Id: Ie78f82a30be252cb841136ea5115f21fc8d762cb
-rw-r--r--.clang-format1
-rw-r--r--docs/sections/customizing.md9
-rw-r--r--source/application/api/use_case/ad/include/AdModel.hpp11
-rw-r--r--source/application/api/use_case/kws/include/MicroNetKwsModel.hpp5
-rw-r--r--source/application/api/use_case/noise_reduction/include/RNNoiseModel.hpp9
-rw-r--r--source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp1
-rw-r--r--source/application/api/use_case/object_detection/include/YoloFastestModel.hpp13
-rw-r--r--source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc61
-rw-r--r--source/hal/source/platform/mps3/CMakeLists.txt2
-rw-r--r--source/use_case/ad/src/MainLoop.cc27
-rw-r--r--source/use_case/ad/usecase.cmake2
-rw-r--r--source/use_case/asr/src/MainLoop.cc12
-rw-r--r--source/use_case/img_class/src/MainLoop.cc17
-rw-r--r--source/use_case/img_class/usecase.cmake2
-rw-r--r--source/use_case/inference_runner/src/MainLoop.cc17
-rw-r--r--source/use_case/inference_runner/usecase.cmake2
-rw-r--r--source/use_case/kws/src/MainLoop.cc12
-rw-r--r--source/use_case/kws_asr/src/MainLoop.cc6
-rw-r--r--source/use_case/noise_reduction/src/MainLoop.cc23
-rw-r--r--source/use_case/noise_reduction/usecase.cmake2
-rw-r--r--source/use_case/object_detection/src/MainLoop.cc17
-rw-r--r--source/use_case/object_detection/src/UseCaseHandler.cc13
-rw-r--r--source/use_case/object_detection/usecase.cmake2
-rw-r--r--source/use_case/vww/src/MainLoop.cc17
-rw-r--r--source/use_case/vww/usecase.cmake2
-rw-r--r--tests/use_case/ad/InferenceTestAD.cc25
-rw-r--r--tests/use_case/img_class/ImgClassificationUCTest.cc37
-rw-r--r--tests/use_case/img_class/InferenceTestMobilenetV2.cc19
-rw-r--r--tests/use_case/kws/InferenceTestMicroNetKws.cc3
-rw-r--r--tests/use_case/kws/KWSHandlerTest.cc2
-rw-r--r--tests/use_case/kws_asr/InferenceTestMicroNetKws.cc14
-rw-r--r--tests/use_case/kws_asr/InferenceTestWav2Letter.cc15
-rw-r--r--tests/use_case/noise_reduction/InferenceTestRNNoise.cc29
-rw-r--r--tests/use_case/noise_reduction/RNNNoiseUCTests.cc54
-rw-r--r--tests/use_case/noise_reduction/RNNoiseModelTests.cc29
-rw-r--r--tests/use_case/object_detection/InferenceTestYoloFastest.cc19
-rw-r--r--tests/use_case/object_detection/ObjectDetectionUCTest.cc31
-rw-r--r--tests/use_case/vww/VisualWakeWordUCTests.cc37
38 files changed, 307 insertions, 292 deletions
diff --git a/.clang-format b/.clang-format
index b22d4c2..d53ffa1 100644
--- a/.clang-format
+++ b/.clang-format
@@ -52,5 +52,6 @@ BraceWrapping:
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: true
+NamespaceIndentation: Inner
---
diff --git a/docs/sections/customizing.md b/docs/sections/customizing.md
index f08706b..d97aa9e 100644
--- a/docs/sections/customizing.md
+++ b/docs/sections/customizing.md
@@ -377,7 +377,7 @@ model.
### Define ModelPointer and ModelSize methods
These functions are wrappers around the functions generated in the C++ file containing the neural network model as an
-array. This generation the C++ array from the `.tflite` file, logic needs to be defined in the `usecase.cmake` file for
+array. This logic for generation of the C++ array from the `.tflite` file needs to be defined in the `usecase.cmake` file for
this `HelloWorld` example.
For more details on `usecase.cmake`, refer to: [Building options](./building.md#build-options).
@@ -391,7 +391,7 @@ Model invokes the `ModelPointer()` function which calls the `GetModelPointer()`
data memory address. The `GetModelPointer()` function is generated during the build and can be found in the file
`build/generated/hello_world/src/<model_file_name>.cc`. The file generated is automatically added to the compilation.
-Use the `${use-case}_MODEL_TFLITE_PATH` build parameter to include custom model to the generation, or compilation,
+Use the `${use-case}_MODEL_TFLITE_PATH` build parameter to include custom model in the generation or compilation
process. Please refer to: [Build options](./building.md#build-options) for further information.
## Executing inference
@@ -404,9 +404,8 @@ To run an inference successfully, you must use:
- A main loop function,
- And some input data.
-For the `hello_world` example below, the input array is not populated. However, for real-world scenarios, and before
-compilation and be baked into the application, this data must either be read from an on-board device, or be prepared in
-the form of C++ sources.
+For the `hello_world` example below the input array is not populated. However, for real-world deployment this data must either be read from an on-board device or be prepared in
+the form of C++ sources and baked into the application before compilation.
For example, the image classification application requires extra build steps to generate C++ sources from the provided
images with `generate_images_code` CMake function.
diff --git a/source/application/api/use_case/ad/include/AdModel.hpp b/source/application/api/use_case/ad/include/AdModel.hpp
index 0436a89..d9f8a08 100644
--- a/source/application/api/use_case/ad/include/AdModel.hpp
+++ b/source/application/api/use_case/ad/include/AdModel.hpp
@@ -19,13 +19,14 @@
#include "Model.hpp"
-extern const int g_FrameLength;
-extern const int g_FrameStride;
-extern const float g_ScoreThreshold;
-extern const float g_TrainingMean;
-
namespace arm {
namespace app {
+ namespace ad {
+ extern const int g_FrameLength;
+ extern const int g_FrameStride;
+ extern const float g_ScoreThreshold;
+ extern const float g_TrainingMean;
+ } /* namespace ad */
class AdModel : public Model {
diff --git a/source/application/api/use_case/kws/include/MicroNetKwsModel.hpp b/source/application/api/use_case/kws/include/MicroNetKwsModel.hpp
index 3d2f3de..e68cd6d 100644
--- a/source/application/api/use_case/kws/include/MicroNetKwsModel.hpp
+++ b/source/application/api/use_case/kws/include/MicroNetKwsModel.hpp
@@ -28,11 +28,6 @@ namespace kws {
extern const uint32_t g_NumMfcc;
extern const uint32_t g_NumAudioWins;
} /* namespace kws */
-} /* namespace app */
-} /* namespace arm */
-
-namespace arm {
-namespace app {
class MicroNetKwsModel : public Model {
public:
diff --git a/source/application/api/use_case/noise_reduction/include/RNNoiseModel.hpp b/source/application/api/use_case/noise_reduction/include/RNNoiseModel.hpp
index 3d2f23c..0cc0809 100644
--- a/source/application/api/use_case/noise_reduction/include/RNNoiseModel.hpp
+++ b/source/application/api/use_case/noise_reduction/include/RNNoiseModel.hpp
@@ -19,12 +19,13 @@
#include "Model.hpp"
-extern const uint32_t g_NumInputFeatures;
-extern const uint32_t g_FrameLength;
-extern const uint32_t g_FrameStride;
-
namespace arm {
namespace app {
+ namespace rnn {
+ extern const uint32_t g_NumInputFeatures;
+ extern const uint32_t g_FrameLength;
+ extern const uint32_t g_FrameStride;
+ } /* namespace rnn */
class RNNoiseModel : public Model {
public:
diff --git a/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp b/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp
index 30bc123..6a53688 100644
--- a/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp
+++ b/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp
@@ -26,7 +26,6 @@
namespace arm {
namespace app {
-
namespace object_detection {
struct Branch {
diff --git a/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp b/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp
index 4c64433..16d0715 100644
--- a/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp
+++ b/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp
@@ -19,13 +19,16 @@
#include "Model.hpp"
-extern const int originalImageSize;
-extern const int channelsImageDisplayed;
-extern const float anchor1[];
-extern const float anchor2[];
-
namespace arm {
namespace app {
+ namespace object_detection {
+ extern const int originalImageSize;
+ extern const int channelsImageDisplayed;
+ /* NOTE: anchors are different for any given input model size, estimated during training
+ * phase */
+ extern const float anchor1[];
+ extern const float anchor2[];
+ } /* namespace object_detection */
class YoloFastestModel : public Model {
diff --git a/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc b/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
index fb1606a..7610c4f 100644
--- a/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
+++ b/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
@@ -43,45 +43,42 @@ namespace app {
m_topN(topN)
{
/* Init PostProcessing */
- this->m_net =
- object_detection::Network {
- .inputWidth = inputImgCols,
+ this->m_net = object_detection::Network{
+ .inputWidth = inputImgCols,
.inputHeight = inputImgRows,
- .numClasses = numClasses,
- .branches = {
- object_detection::Branch {
- .resolution = inputImgCols/32,
- .numBox = 3,
- .anchor = anchor1,
- .modelOutput = this->m_outputTensor0->data.int8,
- .scale = (static_cast<TfLiteAffineQuantization*>(
- this->m_outputTensor0->quantization.params))->scale->data[0],
- .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
- this->m_outputTensor0->quantization.params))->zero_point->data[0],
- .size = this->m_outputTensor0->bytes
- },
- object_detection::Branch {
- .resolution = inputImgCols/16,
- .numBox = 3,
- .anchor = anchor2,
- .modelOutput = this->m_outputTensor1->data.int8,
- .scale = (static_cast<TfLiteAffineQuantization*>(
- this->m_outputTensor1->quantization.params))->scale->data[0],
- .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
- this->m_outputTensor1->quantization.params))->zero_point->data[0],
- .size = this->m_outputTensor1->bytes
- }
- },
- .topN = m_topN
- };
+ .numClasses = numClasses,
+ .branches =
+ {object_detection::Branch{.resolution = inputImgCols / 32,
+ .numBox = 3,
+ .anchor = arm::app::object_detection::anchor1,
+ .modelOutput = this->m_outputTensor0->data.int8,
+ .scale = (static_cast<TfLiteAffineQuantization*>(
+ this->m_outputTensor0->quantization.params))
+ ->scale->data[0],
+ .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
+ this->m_outputTensor0->quantization.params))
+ ->zero_point->data[0],
+ .size = this->m_outputTensor0->bytes},
+ object_detection::Branch{.resolution = inputImgCols / 16,
+ .numBox = 3,
+ .anchor = arm::app::object_detection::anchor2,
+ .modelOutput = this->m_outputTensor1->data.int8,
+ .scale = (static_cast<TfLiteAffineQuantization*>(
+ this->m_outputTensor1->quantization.params))
+ ->scale->data[0],
+ .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
+ this->m_outputTensor1->quantization.params))
+ ->zero_point->data[0],
+ .size = this->m_outputTensor1->bytes}},
+ .topN = m_topN};
/* End init */
}
bool DetectorPostProcess::DoPostProcess()
{
/* Start postprocessing */
- int originalImageWidth = originalImageSize;
- int originalImageHeight = originalImageSize;
+ int originalImageWidth = arm::app::object_detection::originalImageSize;
+ int originalImageHeight = arm::app::object_detection::originalImageSize;
std::forward_list<image::Detection> detections;
GetNetworkBoxes(this->m_net, originalImageWidth, originalImageHeight, m_threshold, detections);
diff --git a/source/hal/source/platform/mps3/CMakeLists.txt b/source/hal/source/platform/mps3/CMakeLists.txt
index 46da2fa..332837c 100644
--- a/source/hal/source/platform/mps3/CMakeLists.txt
+++ b/source/hal/source/platform/mps3/CMakeLists.txt
@@ -25,7 +25,7 @@ project(${PLATFORM_DRIVERS_TARGET}
DESCRIPTION "Platform drivers library for MPS3 FPGA/FVP targets"
LANGUAGES C CXX ASM)
-# 1. We should be cross-compiling (MPS3 taregt only runs Cortex-M targets)
+# 1. We should be cross-compiling (MPS3 target only runs Cortex-M targets)
if (NOT ${CMAKE_CROSSCOMPILING})
message(FATAL_ERROR "No ${PLATFORM_DRIVERS_TARGET} support for this target.")
endif()
diff --git a/source/use_case/ad/src/MainLoop.cc b/source/use_case/ad/src/MainLoop.cc
index c9d763c..4e5edc1 100644
--- a/source/use_case/ad/src/MainLoop.cc
+++ b/source/use_case/ad/src/MainLoop.cc
@@ -22,14 +22,15 @@
#include "BufAttributes.hpp" /* Buffer attributes to be applied */
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ namespace ad {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace ad */
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
enum opcodes
{
MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector */
@@ -53,7 +54,6 @@ static void DisplayMenu()
fflush(stdout);
}
-
void main_loop()
{
arm::app::AdModel model; /* Model wrapper object. */
@@ -61,9 +61,8 @@ void main_loop()
/* Load the model. */
if (!model.Init(arm::app::tensorArena,
sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()))
- {
+ arm::app::ad::GetModelPointer(),
+ arm::app::ad::GetModelLen())) {
printf_err("failed to initialise model\n");
return;
}
@@ -75,10 +74,10 @@ void main_loop()
caseContext.Set<arm::app::Profiler&>("profiler", profiler);
caseContext.Set<arm::app::Model&>("model", model);
caseContext.Set<uint32_t>("clipIndex", 0);
- caseContext.Set<uint32_t>("frameLength", g_FrameLength);
- caseContext.Set<uint32_t>("frameStride", g_FrameStride);
- caseContext.Set<float>("scoreThreshold", g_ScoreThreshold);
- caseContext.Set<float>("trainingMean", g_TrainingMean);
+ caseContext.Set<uint32_t>("frameLength", arm::app::ad::g_FrameLength);
+ caseContext.Set<uint32_t>("frameStride", arm::app::ad::g_FrameStride);
+ caseContext.Set<float>("scoreThreshold", arm::app::ad::g_ScoreThreshold);
+ caseContext.Set<float>("trainingMean", arm::app::ad::g_TrainingMean);
/* Main program loop. */
bool executionSuccessful = true;
diff --git a/source/use_case/ad/usecase.cmake b/source/use_case/ad/usecase.cmake
index 06d7681..d19820d 100644
--- a/source/use_case/ad/usecase.cmake
+++ b/source/use_case/ad/usecase.cmake
@@ -86,4 +86,4 @@ generate_tflite_code(
MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
DESTINATION ${SRC_GEN_DIR}
EXPRESSIONS ${EXTRA_MODEL_CODE}
-)
+ NAMESPACE "arm" "app" "ad")
diff --git a/source/use_case/asr/src/MainLoop.cc b/source/use_case/asr/src/MainLoop.cc
index 354d1f7..290c41c 100644
--- a/source/use_case/asr/src/MainLoop.cc
+++ b/source/use_case/asr/src/MainLoop.cc
@@ -25,11 +25,11 @@
namespace arm {
namespace app {
-namespace asr {
static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- extern uint8_t* GetModelPointer();
- extern size_t GetModelLen();
-} /* namespace asr */
+ namespace asr {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace asr */
} /* namespace app */
} /* namespace arm */
@@ -64,8 +64,8 @@ void main_loop()
arm::app::Wav2LetterModel model; /* Model wrapper object. */
/* Load the model. */
- if (!model.Init(arm::app::asr::tensorArena,
- sizeof(arm::app::asr::tensorArena),
+ if (!model.Init(arm::app::tensorArena,
+ sizeof(arm::app::tensorArena),
arm::app::asr::GetModelPointer(),
arm::app::asr::GetModelLen())) {
printf_err("Failed to initialise model\n");
diff --git a/source/use_case/img_class/src/MainLoop.cc b/source/use_case/img_class/src/MainLoop.cc
index 86ea2ea..a44a401 100644
--- a/source/use_case/img_class/src/MainLoop.cc
+++ b/source/use_case/img_class/src/MainLoop.cc
@@ -24,14 +24,15 @@
#include "BufAttributes.hpp" /* Buffer attributes to be applied */
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace img_class {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace img_class */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
using ImgClassClassifier = arm::app::Classifier;
void main_loop()
@@ -41,8 +42,8 @@ void main_loop()
/* Load the model. */
if (!model.Init(arm::app::tensorArena,
sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen())) {
+ arm::app::img_class::GetModelPointer(),
+ arm::app::img_class::GetModelLen())) {
printf_err("Failed to initialise model\n");
return;
}
diff --git a/source/use_case/img_class/usecase.cmake b/source/use_case/img_class/usecase.cmake
index 2a8be09..e0b6bc8 100644
--- a/source/use_case/img_class/usecase.cmake
+++ b/source/use_case/img_class/usecase.cmake
@@ -63,4 +63,4 @@ USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the eval
generate_tflite_code(
MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
DESTINATION ${SRC_GEN_DIR}
- )
+ NAMESPACE "arm" "app" "img_class")
diff --git a/source/use_case/inference_runner/src/MainLoop.cc b/source/use_case/inference_runner/src/MainLoop.cc
index 28b5c0a..59afa63 100644
--- a/source/use_case/inference_runner/src/MainLoop.cc
+++ b/source/use_case/inference_runner/src/MainLoop.cc
@@ -22,17 +22,15 @@
#include "BufAttributes.hpp" /* Buffer attributes to be applied */
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
-} /* namespace arm */
-
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace inference_runner {
#if defined(DYNAMIC_MODEL_BASE) && defined(DYNAMIC_MODEL_SIZE)
static uint8_t* GetModelPointer()
{
info("Model pointer: 0x%08x\n", DYNAMIC_MODEL_BASE);
- return reinterpret_cast<uint8_t *>(DYNAMIC_MODEL_BASE);
+ return reinterpret_cast<uint8_t*>(DYNAMIC_MODEL_BASE);
}
static size_t GetModelLen()
@@ -49,6 +47,9 @@ extern uint8_t* GetModelPointer();
extern size_t GetModelLen();
#endif /* defined(DYNAMIC_MODEL_BASE) && defined(DYNAMIC_MODEL_SIZE) */
+ } /* namespace inference_runner */
+} /* namespace app */
+} /* namespace arm */
enum opcodes
{
@@ -63,8 +64,8 @@ void main_loop()
/* Load the model. */
if (!model.Init(arm::app::tensorArena,
sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen())) {
+ arm::app::inference_runner::GetModelPointer(),
+ arm::app::inference_runner::GetModelLen())) {
printf_err("Failed to initialise model\n");
return;
}
diff --git a/source/use_case/inference_runner/usecase.cmake b/source/use_case/inference_runner/usecase.cmake
index c70be71..5a86aa6 100644
--- a/source/use_case/inference_runner/usecase.cmake
+++ b/source/use_case/inference_runner/usecase.cmake
@@ -72,5 +72,5 @@ else()
generate_tflite_code(
MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
DESTINATION ${SRC_GEN_DIR}
- )
+ NAMESPACE "arm" "app" "inference_runner")
endif()
diff --git a/source/use_case/kws/src/MainLoop.cc b/source/use_case/kws/src/MainLoop.cc
index 550e7a1..e0518f2 100644
--- a/source/use_case/kws/src/MainLoop.cc
+++ b/source/use_case/kws/src/MainLoop.cc
@@ -26,11 +26,11 @@
namespace arm {
namespace app {
-namespace kws {
static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- extern uint8_t *GetModelPointer();
- extern size_t GetModelLen();
-} /* namespace kws */
+ namespace kws {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace kws */
} /* namespace app */
} /* namespace arm */
@@ -64,8 +64,8 @@ void main_loop()
arm::app::MicroNetKwsModel model; /* Model wrapper object. */
/* Load the model. */
- if (!model.Init(arm::app::kws::tensorArena,
- sizeof(arm::app::kws::tensorArena),
+ if (!model.Init(arm::app::tensorArena,
+ sizeof(arm::app::tensorArena),
arm::app::kws::GetModelPointer(),
arm::app::kws::GetModelLen())) {
printf_err("Failed to initialise model\n");
diff --git a/source/use_case/kws_asr/src/MainLoop.cc b/source/use_case/kws_asr/src/MainLoop.cc
index bba4480..0638ecd 100644
--- a/source/use_case/kws_asr/src/MainLoop.cc
+++ b/source/use_case/kws_asr/src/MainLoop.cc
@@ -28,17 +28,17 @@
namespace arm {
namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
namespace asr {
extern uint8_t* GetModelPointer();
extern size_t GetModelLen();
- }
+ } /* namespace asr */
namespace kws {
extern uint8_t* GetModelPointer();
extern size_t GetModelLen();
- }
+ } /* namespace kws */
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
} /* namespace app */
} /* namespace arm */
diff --git a/source/use_case/noise_reduction/src/MainLoop.cc b/source/use_case/noise_reduction/src/MainLoop.cc
index 257f5cf..bc277da 100644
--- a/source/use_case/noise_reduction/src/MainLoop.cc
+++ b/source/use_case/noise_reduction/src/MainLoop.cc
@@ -22,14 +22,15 @@
#include "BufAttributes.hpp" /* Buffer attributes to be applied */
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace rnn {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace rnn */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
enum opcodes
{
MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector. */
@@ -74,8 +75,8 @@ void main_loop()
/* Load the model. */
if (!model.Init(arm::app::tensorArena,
sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen())) {
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen())) {
printf_err("Failed to initialise model\n");
return;
}
@@ -85,9 +86,9 @@ void main_loop()
arm::app::Profiler profiler{"noise_reduction"};
caseContext.Set<arm::app::Profiler&>("profiler", profiler);
- caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
- caseContext.Set<uint32_t>("frameLength", g_FrameLength);
- caseContext.Set<uint32_t>("frameStride", g_FrameStride);
+ caseContext.Set<uint32_t>("numInputFeatures", arm::app::rnn::g_NumInputFeatures);
+ caseContext.Set<uint32_t>("frameLength", arm::app::rnn::g_FrameLength);
+ caseContext.Set<uint32_t>("frameStride", arm::app::rnn::g_FrameStride);
caseContext.Set<arm::app::RNNoiseModel&>("model", model);
SetAppCtxClipIdx(caseContext, 0);
diff --git a/source/use_case/noise_reduction/usecase.cmake b/source/use_case/noise_reduction/usecase.cmake
index 0cd0761..199f8e1 100644
--- a/source/use_case/noise_reduction/usecase.cmake
+++ b/source/use_case/noise_reduction/usecase.cmake
@@ -81,7 +81,7 @@ generate_tflite_code(
MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
DESTINATION ${SRC_GEN_DIR}
EXPRESSIONS ${EXTRA_MODEL_CODE}
-)
+ NAMESPACE "arm" "app" "rnn")
# For MPS3, allow dumping of output data to memory, based on these parameters:
diff --git a/source/use_case/object_detection/src/MainLoop.cc b/source/use_case/object_detection/src/MainLoop.cc
index 4d70d2d..dc9b693 100644
--- a/source/use_case/object_detection/src/MainLoop.cc
+++ b/source/use_case/object_detection/src/MainLoop.cc
@@ -23,14 +23,15 @@
#include "BufAttributes.hpp" /* Buffer attributes to be applied */
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace object_detection {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace object_detection */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
static void DisplayDetectionMenu()
{
printf("\n\n");
@@ -52,8 +53,8 @@ void main_loop()
/* Load the model. */
if (!model.Init(arm::app::tensorArena,
sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen())) {
+ arm::app::object_detection::GetModelPointer(),
+ arm::app::object_detection::GetModelLen())) {
printf_err("Failed to initialise model\n");
return;
}
diff --git a/source/use_case/object_detection/src/UseCaseHandler.cc b/source/use_case/object_detection/src/UseCaseHandler.cc
index 4d0877a..e9bcd4a 100644
--- a/source/use_case/object_detection/src/UseCaseHandler.cc
+++ b/source/use_case/object_detection/src/UseCaseHandler.cc
@@ -27,6 +27,9 @@
namespace arm {
namespace app {
+ namespace object_detection {
+ extern const int channelsImageDisplayed;
+ } /* namespace object_detection */
/**
* @brief Presents inference results along using the data presentation
@@ -122,9 +125,13 @@ namespace app {
/* Display image on the LCD. */
hal_lcd_display_image(
- (channelsImageDisplayed == 3) ? currImage : dstPtr,
- inputImgCols, inputImgRows, channelsImageDisplayed,
- dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
+ (arm::app::object_detection::channelsImageDisplayed == 3) ? currImage : dstPtr,
+ inputImgCols,
+ inputImgRows,
+ arm::app::object_detection::channelsImageDisplayed,
+ dataPsnImgStartX,
+ dataPsnImgStartY,
+ dataPsnImgDownscaleFactor);
/* Display message on the LCD - inference running. */
hal_lcd_display_text(str_inf.c_str(), str_inf.size(),
diff --git a/source/use_case/object_detection/usecase.cmake b/source/use_case/object_detection/usecase.cmake
index 850e7fc..b0a07d5 100644
--- a/source/use_case/object_detection/usecase.cmake
+++ b/source/use_case/object_detection/usecase.cmake
@@ -76,4 +76,4 @@ generate_tflite_code(
MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
DESTINATION ${SRC_GEN_DIR}
EXPRESSIONS ${EXTRA_MODEL_CODE}
- )
+ NAMESPACE "arm" "app" "object_detection")
diff --git a/source/use_case/vww/src/MainLoop.cc b/source/use_case/vww/src/MainLoop.cc
index fae7530..4fb5df7 100644
--- a/source/use_case/vww/src/MainLoop.cc
+++ b/source/use_case/vww/src/MainLoop.cc
@@ -25,14 +25,15 @@
#include "BufAttributes.hpp" /* Buffer attributes to be applied */
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace vww {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace vww */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
using ViusalWakeWordClassifier = arm::app::Classifier;
void main_loop()
@@ -42,8 +43,8 @@ void main_loop()
/* Load the model. */
if (!model.Init(arm::app::tensorArena,
sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen())) {
+ arm::app::vww::GetModelPointer(),
+ arm::app::vww::GetModelLen())) {
printf_err("Failed to initialise model\n");
return;
}
diff --git a/source/use_case/vww/usecase.cmake b/source/use_case/vww/usecase.cmake
index f6a3efe..7ef4596 100644
--- a/source/use_case/vww/usecase.cmake
+++ b/source/use_case/vww/usecase.cmake
@@ -48,7 +48,7 @@ USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the eval
generate_tflite_code(
MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
DESTINATION ${SRC_GEN_DIR}
-)
+ NAMESPACE "arm" "app" "vww")
# Generate labels file
set(${use_case}_LABELS_CPP_FILE Labels)
diff --git a/tests/use_case/ad/InferenceTestAD.cc b/tests/use_case/ad/InferenceTestAD.cc
index d837617..d033407 100644
--- a/tests/use_case/ad/InferenceTestAD.cc
+++ b/tests/use_case/ad/InferenceTestAD.cc
@@ -29,14 +29,15 @@
#endif /* AD_FEATURE_VEC_DATA_SIZE */
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace ad {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace ad */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
using namespace test;
bool RunInference(arm::app::Model& model, const int8_t vec[])
@@ -95,9 +96,9 @@ TEST_CASE("Running random inference with TensorFlow Lite Micro and AdModel Int8"
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::ad::GetModelPointer(),
+ arm::app::ad::GetModelLen()));
REQUIRE(model.IsInited());
REQUIRE(RunInferenceRandom(model));
@@ -116,9 +117,9 @@ TEST_CASE("Running golden vector inference with TensorFlow Lite Micro and AdMode
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::ad::GetModelPointer(),
+ arm::app::ad::GetModelLen()));
REQUIRE(model.IsInited());
TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
diff --git a/tests/use_case/img_class/ImgClassificationUCTest.cc b/tests/use_case/img_class/ImgClassificationUCTest.cc
index d8339b6..1685e5f 100644
--- a/tests/use_case/img_class/ImgClassificationUCTest.cc
+++ b/tests/use_case/img_class/ImgClassificationUCTest.cc
@@ -25,14 +25,15 @@
#include <catch.hpp>
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace img_class {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace img_class */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
TEST_CASE("Model info")
{
/* Model wrapper object. */
@@ -40,9 +41,9 @@ TEST_CASE("Model info")
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::img_class::GetModelPointer(),
+ arm::app::img_class::GetModelLen()));
/* Instantiate application context. */
arm::app::ApplicationContext caseContext;
@@ -63,9 +64,9 @@ TEST_CASE("Inference by index", "[.]")
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::img_class::GetModelPointer(),
+ arm::app::img_class::GetModelLen()));
/* Instantiate application context. */
arm::app::ApplicationContext caseContext;
@@ -99,9 +100,9 @@ TEST_CASE("Inference run all images", "[.]")
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::img_class::GetModelPointer(),
+ arm::app::img_class::GetModelLen()));
/* Instantiate application context. */
arm::app::ApplicationContext caseContext;
@@ -131,9 +132,9 @@ TEST_CASE("List all images")
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::img_class::GetModelPointer(),
+ arm::app::img_class::GetModelLen()));
/* Instantiate application context. */
arm::app::ApplicationContext caseContext;
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
index 30ce19f..6cc1190 100644
--- a/tests/use_case/img_class/InferenceTestMobilenetV2.cc
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -25,12 +25,13 @@
namespace arm {
namespace app {
static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace img_class {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace img_class */
} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
using namespace test;
bool RunInference(arm::app::Model& model, const int8_t imageData[])
@@ -78,9 +79,9 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and MobileNeV2 Uint8", "
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::img_class::GetModelPointer(),
+ arm::app::img_class::GetModelLen()));
REQUIRE(model.IsInited());
for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) {
@@ -95,9 +96,9 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and MobileNeV2 Uint8", "
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::img_class::GetModelPointer(),
+ arm::app::img_class::GetModelLen()));
REQUIRE(model.IsInited());
TestInference<uint8_t>(i, model, 1);
diff --git a/tests/use_case/kws/InferenceTestMicroNetKws.cc b/tests/use_case/kws/InferenceTestMicroNetKws.cc
index a6f7a03..3dc75e3 100644
--- a/tests/use_case/kws/InferenceTestMicroNetKws.cc
+++ b/tests/use_case/kws/InferenceTestMicroNetKws.cc
@@ -25,9 +25,8 @@
namespace arm {
namespace app {
static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
-
namespace kws {
- extern uint8_t *GetModelPointer();
+ extern uint8_t* GetModelPointer();
extern size_t GetModelLen();
} /* namespace kws */
} /* namespace app */
diff --git a/tests/use_case/kws/KWSHandlerTest.cc b/tests/use_case/kws/KWSHandlerTest.cc
index d9d00a8..db67e54 100644
--- a/tests/use_case/kws/KWSHandlerTest.cc
+++ b/tests/use_case/kws/KWSHandlerTest.cc
@@ -30,7 +30,7 @@ namespace arm {
namespace kws {
extern uint8_t* GetModelPointer();
extern size_t GetModelLen();
- }
+ } /* namespace kws */
} /* namespace app */
} /* namespace arm */
diff --git a/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc b/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
index 4ba4693..76c7e90 100644
--- a/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
+++ b/tests/use_case/kws_asr/InferenceTestMicroNetKws.cc
@@ -23,13 +23,13 @@
#include <random>
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- namespace kws {
- extern uint8_t* GetModelPointer();
- extern size_t GetModelLen();
- }
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace kws {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace kws */
+} /* namespace app */
} /* namespace arm */
namespace test {
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
index 5d30211..6089e91 100644
--- a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -23,14 +23,13 @@
#include <random>
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
-
- namespace asr {
- extern uint8_t* GetModelPointer();
- extern size_t GetModelLen();
- }
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace asr {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace asr */
+} /* namespace app */
} /* namespace arm */
namespace test {
diff --git a/tests/use_case/noise_reduction/InferenceTestRNNoise.cc b/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
index 9dc640b..fdc59c1 100644
--- a/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
+++ b/tests/use_case/noise_reduction/InferenceTestRNNoise.cc
@@ -23,16 +23,17 @@
#include <random>
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace rnn {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace rnn */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
namespace test {
-namespace rnnoise {
+namespace noise_reduction {
bool RunInference(arm::app::Model& model, const std::vector<std::vector<int8_t>> inData)
{
@@ -73,9 +74,9 @@ namespace rnnoise {
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen()));
REQUIRE(model.IsInited());
REQUIRE(RunInferenceRandom(model));
@@ -135,14 +136,14 @@ namespace rnnoise {
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen()));
REQUIRE(model.IsInited());
TestInference<int8_t>(goldenInputFV, goldenOutputFV, model);
}
}
-} /* namespace rnnoise */
+} /* namespace noise_reduction */
} /* namespace test */
diff --git a/tests/use_case/noise_reduction/RNNNoiseUCTests.cc b/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
index bebfdfd..310814f 100644
--- a/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
+++ b/tests/use_case/noise_reduction/RNNNoiseUCTests.cc
@@ -25,14 +25,15 @@
#include <Profiler.hpp>
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace rnn {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace rnn */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
#define PLATFORM hal_platform_init();
#define CONTEXT \
@@ -48,9 +49,9 @@ TEST_CASE("Verify output tensor memory dump")
arm::app::RNNoiseModel model{};
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen()));
REQUIRE(model.IsInited());
/* Populate the output tensors */
@@ -112,15 +113,15 @@ TEST_CASE("Inference run all clips", "[RNNoise]")
CONTEXT
caseContext.Set<uint32_t>("clipIndex", 0);
- caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
- caseContext.Set<uint32_t>("frameLength", g_FrameLength);
- caseContext.Set<uint32_t>("frameStride", g_FrameStride);
+ caseContext.Set<uint32_t>("numInputFeatures", arm::app::rnn::g_NumInputFeatures);
+ caseContext.Set<uint32_t>("frameLength", arm::app::rnn::g_FrameLength);
+ caseContext.Set<uint32_t>("frameStride", arm::app::rnn::g_FrameStride);
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen()));
REQUIRE(arm::app::NoiseReductionHandler(caseContext, true));
}
@@ -147,22 +148,22 @@ void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
caseContext.Set<std::function<const int16_t*(const uint32_t)>>("features", get_audio_array);
caseContext.Set<std::function<const char* (const uint32_t)>>("featureFileNames", get_test_filename);
- caseContext.Set<uint32_t>("frameLength", g_FrameLength);
- caseContext.Set<uint32_t>("frameStride", g_FrameStride);
- caseContext.Set<uint32_t>("numInputFeatures", g_NumInputFeatures);
+ caseContext.Set<uint32_t>("frameLength", arm::app::rnn::g_FrameLength);
+ caseContext.Set<uint32_t>("frameStride", arm::app::rnn::g_FrameStride);
+ caseContext.Set<uint32_t>("numInputFeatures", arm::app::rnn::g_NumInputFeatures);
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen()));
- size_t oneInferenceOutSizeBytes = g_FrameLength * sizeof(int16_t);
+ size_t oneInferenceOutSizeBytes = arm::app::rnn::g_FrameLength * sizeof(int16_t);
auto infIndex = 0;
for (auto numInf: numberOfInferences) {
DYNAMIC_SECTION("Number of features: "<< numInf) {
caseContext.Set<uint32_t>("clipIndex", 1); /* Only getting p232_208.wav for tests. */
- uint32_t audioSizeInput = numInf*g_FrameLength;
+ uint32_t audioSizeInput = numInf * arm::app::rnn::g_FrameLength;
caseContext.Set<std::function<uint32_t(const uint32_t)>>("featureSizes",
get_golden_input_p232_208_array_size(audioSizeInput));
@@ -180,12 +181,13 @@ void testInfByIndex(std::vector<uint32_t>& numberOfInferences) {
REQUIRE(arm::app::NoiseReductionHandler(caseContext, false));
/* The expected output after post-processing. */
- std::vector<int16_t> golden(&ofms[infIndex][0], &ofms[infIndex][0] + g_FrameLength);
+ std::vector<int16_t> golden(&ofms[infIndex][0],
+ &ofms[infIndex][0] + arm::app::rnn::g_FrameLength);
size_t startOfLastInfOut = undefMemDumpBytesWritten - oneInferenceOutSizeBytes;
/* The actual result from the usecase handler. */
- std::vector<int16_t> runtime(g_FrameLength);
+ std::vector<int16_t> runtime(arm::app::rnn::g_FrameLength);
std::memcpy(runtime.data(), &memDump[startOfLastInfOut], oneInferenceOutSizeBytes);
/* Margin of 43 is 0.07% error. */
@@ -211,7 +213,7 @@ TEST_CASE("Inference by index - several inferences", "[RNNoise]")
REQUIRE(64757 == totalAudioSize); /* Checking that the input file is as expected and has not changed. */
/* 3 different inference amounts: 1, 2 and all inferences required to cover total feature set */
- uint32_t totalInferences = totalAudioSize / g_FrameLength;
+ uint32_t totalInferences = totalAudioSize / arm::app::rnn::g_FrameLength;
std::vector<uint32_t> numberOfInferences = {1, 2, totalInferences};
testInfByIndex(numberOfInferences);
}
diff --git a/tests/use_case/noise_reduction/RNNoiseModelTests.cc b/tests/use_case/noise_reduction/RNNoiseModelTests.cc
index 9720ba5..7bd83b1 100644
--- a/tests/use_case/noise_reduction/RNNoiseModelTests.cc
+++ b/tests/use_case/noise_reduction/RNNoiseModelTests.cc
@@ -23,14 +23,15 @@
#include <random>
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace rnn {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace rnn */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
bool RunInference(arm::app::Model& model, std::vector<int8_t> vec,
const size_t sizeRequired, const size_t dataInputIndex)
{
@@ -73,8 +74,8 @@ TEST_CASE("Running random inference with TensorFlow Lite Micro and RNNoiseModel
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen()));
REQUIRE(model.IsInited());
model.ResetGruState();
@@ -128,9 +129,9 @@ TEST_CASE("Test initial GRU out state is 0", "[RNNoise]")
{
TestRNNoiseModel model{};
model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen());
+ sizeof(arm::app::tensorArena),
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen());
auto map = model.GetStateMap();
@@ -152,9 +153,9 @@ TEST_CASE("Test GRU state copy", "[RNNoise]")
{
TestRNNoiseModel model{};
model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen());
+ sizeof(arm::app::tensorArena),
+ arm::app::rnn::GetModelPointer(),
+ arm::app::rnn::GetModelLen());
REQUIRE(RunInferenceRandom(model, 0));
auto map = model.GetStateMap();
diff --git a/tests/use_case/object_detection/InferenceTestYoloFastest.cc b/tests/use_case/object_detection/InferenceTestYoloFastest.cc
index 1b4d1dd..eb92904 100644
--- a/tests/use_case/object_detection/InferenceTestYoloFastest.cc
+++ b/tests/use_case/object_detection/InferenceTestYoloFastest.cc
@@ -25,12 +25,13 @@
namespace arm {
namespace app {
static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace object_detection {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace object_detection */
} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
#include <catch.hpp>
void GetExpectedResults(std::vector<std::vector<arm::app::object_detection::DetectionResult>> &expected_results)
@@ -132,9 +133,9 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and YoloFastest", "[Yolo
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::object_detection::GetModelPointer(),
+ arm::app::object_detection::GetModelLen()));
REQUIRE(model.IsInited());
for (uint32_t i = 0 ; i < NUMBER_OF_FILES; ++i) {
@@ -149,9 +150,9 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and YoloFastest", "[Yolo
REQUIRE_FALSE(model.IsInited());
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::object_detection::GetModelPointer(),
+ arm::app::object_detection::GetModelLen()));
REQUIRE(model.IsInited());
TestInferenceDetectionResults<uint8_t>(i, model, 1);
diff --git a/tests/use_case/object_detection/ObjectDetectionUCTest.cc b/tests/use_case/object_detection/ObjectDetectionUCTest.cc
index ffb4976..c21a416 100644
--- a/tests/use_case/object_detection/ObjectDetectionUCTest.cc
+++ b/tests/use_case/object_detection/ObjectDetectionUCTest.cc
@@ -27,12 +27,13 @@
namespace arm {
namespace app {
static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace object_detection {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace object_detection */
} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
TEST_CASE("Model info")
{
/* Model wrapper object. */
@@ -40,9 +41,9 @@ TEST_CASE("Model info")
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::object_detection::GetModelPointer(),
+ arm::app::object_detection::GetModelLen()));
/* Instantiate application context. */
arm::app::ApplicationContext caseContext;
@@ -63,9 +64,9 @@ TEST_CASE("Inference by index")
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::object_detection::GetModelPointer(),
+ arm::app::object_detection::GetModelLen()));
/* Instantiate application context. */
arm::app::ApplicationContext caseContext;
@@ -89,9 +90,9 @@ TEST_CASE("Inference run all images")
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::object_detection::GetModelPointer(),
+ arm::app::object_detection::GetModelLen()));
/* Instantiate application context. */
arm::app::ApplicationContext caseContext;
@@ -115,9 +116,9 @@ TEST_CASE("List all images")
/* Load the model. */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::object_detection::GetModelPointer(),
+ arm::app::object_detection::GetModelLen()));
/* Instantiate application context. */
arm::app::ApplicationContext caseContext;
diff --git a/tests/use_case/vww/VisualWakeWordUCTests.cc b/tests/use_case/vww/VisualWakeWordUCTests.cc
index 05a31a4..fe3782b 100644
--- a/tests/use_case/vww/VisualWakeWordUCTests.cc
+++ b/tests/use_case/vww/VisualWakeWordUCTests.cc
@@ -25,23 +25,24 @@
#include "UseCaseCommonUtils.hpp"
namespace arm {
- namespace app {
- static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
- } /* namespace app */
+namespace app {
+ static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
+ namespace vww {
+ extern uint8_t* GetModelPointer();
+ extern size_t GetModelLen();
+ } /* namespace vww */
+} /* namespace app */
} /* namespace arm */
-extern uint8_t* GetModelPointer();
-extern size_t GetModelLen();
-
TEST_CASE("Model info")
{
arm::app::VisualWakeWordModel model; /* model wrapper object */
/* Load the model */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::vww::GetModelPointer(),
+ arm::app::vww::GetModelLen()));
/* Instantiate application context */
arm::app::ApplicationContext caseContext;
@@ -59,9 +60,9 @@ TEST_CASE("Inference by index")
/* Load the model */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::vww::GetModelPointer(),
+ arm::app::vww::GetModelLen()));
/* Instantiate application context */
arm::app::ApplicationContext caseContext;
@@ -92,9 +93,9 @@ TEST_CASE("Inference run all images")
/* Load the model */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::vww::GetModelPointer(),
+ arm::app::vww::GetModelLen()));
/* Instantiate application context */
arm::app::ApplicationContext caseContext;
@@ -121,9 +122,9 @@ TEST_CASE("List all images")
/* Load the model */
REQUIRE(model.Init(arm::app::tensorArena,
- sizeof(arm::app::tensorArena),
- GetModelPointer(),
- GetModelLen()));
+ sizeof(arm::app::tensorArena),
+ arm::app::vww::GetModelPointer(),
+ arm::app::vww::GetModelLen()));
/* Instantiate application context */
arm::app::ApplicationContext caseContext;