aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2021-10-28 11:11:35 +0100
committerFrancis Murtagh <francis.murtagh@arm.com>2021-10-28 10:36:25 +0000
commit40d2741b459fb7d55ff034d615678848a8e0af7c (patch)
treee0d89fcf02a2a6c2a0a6b8121105c1371cf57193 /tests
parent040719d09b696dcb979b4dcadfc32181cb84301a (diff)
downloadarmnn-40d2741b459fb7d55ff034d615678848a8e0af7c.tar.gz
IVGCVSW-6513: Compilation failure in armnn-mobilenet-quant in ML-Examples
* Move TContainer to armnnUtils library Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: I3c0f895d11b66f6ee224ac689a19d0477f990b98
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp14
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.cpp2
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.hpp2
-rw-r--r--tests/InferenceModel.hpp16
-rw-r--r--tests/InferenceTest.hpp11
-rw-r--r--tests/InferenceTest.inl4
-rw-r--r--tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp8
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp2
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp4
9 files changed, 34 insertions, 29 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index db15872ad6..dd3c0a32a1 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -9,8 +9,8 @@
#include <AsyncExecutionCallback.hpp>
#include <armnn/Logging.hpp>
-#include <armnn/Utils.hpp>
#include <armnnUtils/Filesystem.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <InferenceTest.hpp>
#if defined(ARMNN_SERIALIZER)
@@ -370,8 +370,8 @@ int MainImpl(const ExecuteNetworkParams& params,
{
using namespace std::chrono;
- std::vector<std::vector<armnn::TContainer>> inputs;
- std::vector<std::vector<armnn::TContainer>> outputs;
+ std::vector<std::vector<armnnUtils::TContainer>> inputs;
+ std::vector<std::vector<armnnUtils::TContainer>> outputs;
try
{
@@ -436,7 +436,7 @@ int MainImpl(const ExecuteNetworkParams& params,
for(unsigned int j = 0; j < params.m_Iterations ; ++j)
{
- std::vector<armnn::TContainer> inputDataContainers;
+ std::vector<armnnUtils::TContainer> inputDataContainers;
for(unsigned int i = 0; i < numInputs; ++i)
{
// If there are less input files given than required for the execution of
@@ -460,7 +460,7 @@ int MainImpl(const ExecuteNetworkParams& params,
numElements = params.m_InputTensorShapes[i]->GetNumElements();
}
- armnn::TContainer tensorData;
+ armnnUtils::TContainer tensorData;
PopulateTensorWithData(tensorData,
numElements,
params.m_InputTypes[i],
@@ -476,7 +476,7 @@ int MainImpl(const ExecuteNetworkParams& params,
for (unsigned int j = 0; j < params.m_Iterations; ++j)
{
- std::vector <armnn::TContainer> outputDataContainers;
+ std::vector <armnnUtils::TContainer> outputDataContainers;
for (unsigned int i = 0; i < numOutputs; ++i)
{
if (params.m_OutputTypes[i].compare("float") == 0)
@@ -596,7 +596,7 @@ int MainImpl(const ExecuteNetworkParams& params,
{
ARMNN_LOG(info) << "Asynchronous execution with Arm NN thread pool... \n";
armnn::AsyncCallbackManager callbackManager;
- std::unordered_map<armnn::InferenceId, std::vector<armnn::TContainer>&> inferenceOutputMap;
+ std::unordered_map<armnn::InferenceId, std::vector<armnnUtils::TContainer>&> inferenceOutputMap;
// Declare the latest and earliest inference times here to be used when calculating overall time
std::chrono::high_resolution_clock::time_point earliestStartTime;
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index a69a098eb4..0f1cf6d890 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -296,7 +296,7 @@ int main(int argc, char* argv[])
const unsigned int batchSize = 1;
const armnn::DataLayout outputLayout(cmdline.GetLayout());
- std::vector<armnn::TContainer> imageDataContainers;
+ std::vector<armnnUtils::TContainer> imageDataContainers;
const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType);
try
{
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index c668608f1d..b9579e7636 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -5,9 +5,9 @@
#include "../InferenceTestImage.hpp"
-#include <armnn/Utils.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <armnnUtils/Permute.hpp>
#include <algorithm>
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index e2cd5d9868..13f7d74e8d 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -7,13 +7,15 @@
#include <armnn/ArmNN.hpp>
-#include <armnn/Utils.hpp>
#include <armnn/Threadpool.hpp>
#include <armnn/Logging.hpp>
#include <armnn/utility/Timer.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
+
+#include <armnnUtils/TContainer.hpp>
+
#include <common/include/ProfilingGuid.hpp>
#if defined(ARMNN_SERIALIZER)
@@ -584,8 +586,8 @@ public:
}
std::chrono::duration<double, std::milli> Run(
- const std::vector<armnn::TContainer>& inputContainers,
- std::vector<armnn::TContainer>& outputContainers)
+ const std::vector<armnnUtils::TContainer>& inputContainers,
+ std::vector<armnnUtils::TContainer>& outputContainers)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
{
@@ -633,8 +635,8 @@ public:
std::tuple<unsigned int, std::chrono::duration<double, std::milli>> RunAsync(
armnn::experimental::IWorkingMemHandle& workingMemHandleRef,
- const std::vector<armnn::TContainer>& inputContainers,
- std::vector<armnn::TContainer>& outputContainers,
+ const std::vector<armnnUtils::TContainer>& inputContainers,
+ std::vector<armnnUtils::TContainer>& outputContainers,
unsigned int inferenceID)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
@@ -684,8 +686,8 @@ public:
}
}
- void RunAsync(const std::vector<armnn::TContainer>& inputContainers,
- std::vector<armnn::TContainer>& outputContainers,
+ void RunAsync(const std::vector<armnnUtils::TContainer>& inputContainers,
+ std::vector<armnnUtils::TContainer>& outputContainers,
std::shared_ptr<armnn::IAsyncExecutionCallback> cb)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index d0bb0c00f3..fb9c048488 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -7,11 +7,12 @@
#include "InferenceModel.hpp"
#include <armnn/ArmNN.hpp>
-#include <armnn/Utils.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnnUtils/TContainer.hpp>
+
#include <cxxopts/cxxopts.hpp>
#include <fmt/format.h>
@@ -114,7 +115,7 @@ public:
InferenceModelTestCase(TModel& model,
unsigned int testCaseId,
- const std::vector<armnn::TContainer>& inputs,
+ const std::vector<armnnUtils::TContainer>& inputs,
const std::vector<unsigned int>& outputSizes)
: m_Model(model)
, m_TestCaseId(testCaseId)
@@ -137,13 +138,13 @@ public:
protected:
unsigned int GetTestCaseId() const { return m_TestCaseId; }
- const std::vector<armnn::TContainer>& GetOutputs() const { return m_Outputs; }
+ const std::vector<armnnUtils::TContainer>& GetOutputs() const { return m_Outputs; }
private:
TModel& m_Model;
unsigned int m_TestCaseId;
- std::vector<armnn::TContainer> m_Inputs;
- std::vector<armnn::TContainer> m_Outputs;
+ std::vector<armnnUtils::TContainer> m_Inputs;
+ std::vector<armnnUtils::TContainer> m_Outputs;
};
template <typename TTestCaseDatabase, typename TModel>
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 91a90f3820..b6087c5e5a 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -7,6 +7,8 @@
#include <armnn/Utils.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
+#include <armnnUtils/TContainer.hpp>
+
#include "CxxoptsUtils.hpp"
#include <cxxopts/cxxopts.hpp>
@@ -38,7 +40,7 @@ ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
unsigned int label,
std::vector<typename TModel::DataType> modelInput)
: InferenceModelTestCase<TModel>(
- model, testCaseId, std::vector<armnn::TContainer>{ modelInput }, { model.GetOutputSize() })
+ model, testCaseId, std::vector<armnnUtils::TContainer>{ modelInput }, { model.GetOutputSize() })
, m_Label(label)
, m_QuantizationParams(model.GetQuantizationParams())
, m_NumInferencesRef(numInferencesRef)
diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
index d1d31f48a1..c08d88e053 100644
--- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
+++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
@@ -9,7 +9,7 @@
#include "armnnDeserializer/IDeserializer.hpp"
#include <armnnUtils/Filesystem.hpp>
-#include <armnn/Utils.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <cxxopts/cxxopts.hpp>
#include <map>
@@ -325,8 +325,8 @@ int main(int argc, char* argv[])
const std::string imageName = imageEntry.first;
std::cout << "Processing image: " << imageName << "\n";
- vector<armnn::TContainer> inputDataContainers;
- vector<armnn::TContainer> outputDataContainers;
+ vector<armnnUtils::TContainer> inputDataContainers;
+ vector<armnnUtils::TContainer> outputDataContainers;
auto imagePath = pathToDataDir / fs::path(imageName);
switch (inputTensorDataType)
@@ -370,7 +370,7 @@ int main(int argc, char* argv[])
ARMNN_LOG(fatal) << "armnn::IRuntime: Failed to enqueue workload for image: " << imageName;
}
- checker.AddImageResult<armnn::TContainer>(imageName, outputDataContainers);
+ checker.AddImageResult<armnnUtils::TContainer>(imageName, outputDataContainers);
}
}
else
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 323e9fb812..6c74aaa6ed 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -228,7 +228,7 @@ void TensorPrinter::WriteToFile(const std::vector<T>& values)
}
}
-void PopulateTensorWithData(armnn::TContainer& tensorData,
+void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
unsigned int numElements,
const std::string& dataTypeStr,
const armnn::Optional<QuantizationParams>& qParams,
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index d9e2459703..bc2868ab35 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -9,7 +9,7 @@
#include <armnn/Types.hpp>
#include <armnn/Logging.hpp>
#include <armnn/utility/StringUtils.hpp>
-#include <armnn/Utils.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <iostream>
#include <fstream>
@@ -53,7 +53,7 @@ private:
using QuantizationParams = std::pair<float, int32_t>;
-void PopulateTensorWithData(armnn::TContainer& tensorData,
+void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
unsigned int numElements,
const std::string& dataTypeStr,
const armnn::Optional<QuantizationParams>& qParams,