aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Monahan <David.Monahan@arm.com>2021-10-22 12:57:28 +0100
committerDavid Monahan <david.monahan@arm.com>2021-10-22 13:53:18 +0000
commit6bb47a720ad33d0a330228b52e320a1022e4dfe2 (patch)
tree606434ca03951a79c828fb6371deb4e62fdcd858
parent81ec994a3ebc8ad02c4a622846cf64b70e1182bd (diff)
downloadarmnn-6bb47a720ad33d0a330228b52e320a1022e4dfe2.tar.gz
IVGCVSW-6359 Create a single definition of TContainer
* Added a single definition of TContainer to include/armnn/Utils.hpp * Change all files which contained their own identical definitions of TContainer to use the new one Signed-off-by: David Monahan <David.Monahan@arm.com> Change-Id: I63e633693a430bbbd6a29001cafa19742ef8309a
-rw-r--r--include/armnn/Utils.hpp7
-rw-r--r--src/armnn/test/ModelAccuracyCheckerTest.cpp21
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp13
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.cpp5
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.hpp1
-rw-r--r--tests/InferenceModel.hpp17
-rw-r--r--tests/InferenceTest.hpp15
-rw-r--r--tests/InferenceTest.inl6
-rw-r--r--tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp9
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp6
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp7
11 files changed, 52 insertions, 55 deletions
diff --git a/include/armnn/Utils.hpp b/include/armnn/Utils.hpp
index b090651813..533117c5d9 100644
--- a/include/armnn/Utils.hpp
+++ b/include/armnn/Utils.hpp
@@ -5,6 +5,8 @@
#pragma once
#include "armnn/TypesUtils.hpp"
+
+#include <mapbox/variant.hpp>
#include <iostream>
namespace armnn
@@ -40,4 +42,9 @@ bool NeonDetected();
const std::string GetVersion();
+// Standard definition of TContainer used by ArmNN, use this definition or add alternative definitions here instead of
+// defining your own.
+using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
+
} // namespace armnn
diff --git a/src/armnn/test/ModelAccuracyCheckerTest.cpp b/src/armnn/test/ModelAccuracyCheckerTest.cpp
index 47f112ee72..47ae3f4b0a 100644
--- a/src/armnn/test/ModelAccuracyCheckerTest.cpp
+++ b/src/armnn/test/ModelAccuracyCheckerTest.cpp
@@ -3,6 +3,7 @@
// SPDX-License-Identifier: MIT
//
#include "ModelAccuracyChecker.hpp"
+#include <armnn/Utils.hpp>
#include <doctest/doctest.h>
@@ -52,8 +53,6 @@ struct TestHelper
TEST_SUITE("ModelAccuracyCheckerTest")
{
-using TContainer =
- mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
{
@@ -61,12 +60,12 @@ TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
// Add image 1 and check accuracy
std::vector<float> inferenceOutputVector1 = {0.05f, 0.10f, 0.70f, 0.15f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
- TContainer inference1Container(inferenceOutputVector1);
- std::vector<TContainer> outputTensor1;
+ armnn::TContainer inference1Container(inferenceOutputVector1);
+ std::vector<armnn::TContainer> outputTensor1;
outputTensor1.push_back(inference1Container);
std::string imageName = "val_01.JPEG";
- checker.AddImageResult<TContainer>(imageName, outputTensor1);
+ checker.AddImageResult<armnn::TContainer>(imageName, outputTensor1);
// Top 1 Accuracy
float totalAccuracy = checker.GetAccuracy(1);
@@ -74,12 +73,12 @@ TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
// Add image 2 and check accuracy
std::vector<float> inferenceOutputVector2 = {0.10f, 0.0f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
- TContainer inference2Container(inferenceOutputVector2);
- std::vector<TContainer> outputTensor2;
+ armnn::TContainer inference2Container(inferenceOutputVector2);
+ std::vector<armnn::TContainer> outputTensor2;
outputTensor2.push_back(inference2Container);
imageName = "val_02.JPEG";
- checker.AddImageResult<TContainer>(imageName, outputTensor2);
+ checker.AddImageResult<armnn::TContainer>(imageName, outputTensor2);
// Top 1 Accuracy
totalAccuracy = checker.GetAccuracy(1);
@@ -91,12 +90,12 @@ TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
// Add image 3 and check accuracy
std::vector<float> inferenceOutputVector3 = {0.0f, 0.10f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
- TContainer inference3Container(inferenceOutputVector3);
- std::vector<TContainer> outputTensor3;
+ armnn::TContainer inference3Container(inferenceOutputVector3);
+ std::vector<armnn::TContainer> outputTensor3;
outputTensor3.push_back(inference3Container);
imageName = "val_03.JPEG";
- checker.AddImageResult<TContainer>(imageName, outputTensor3);
+ checker.AddImageResult<armnn::TContainer>(imageName, outputTensor3);
// Top 1 Accuracy
totalAccuracy = checker.GetAccuracy(1);
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index fa8c8c8761..66be8fd02a 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -9,6 +9,7 @@
#include <AsyncExecutionCallback.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/Utils.hpp>
#include <armnnUtils/Filesystem.hpp>
#include <InferenceTest.hpp>
@@ -370,8 +371,8 @@ int MainImpl(const ExecuteNetworkParams& params,
{
using namespace std::chrono;
- std::vector<std::vector<TContainer>> inputs;
- std::vector<std::vector<TContainer>> outputs;
+ std::vector<std::vector<armnn::TContainer>> inputs;
+ std::vector<std::vector<armnn::TContainer>> outputs;
try
{
@@ -436,7 +437,7 @@ int MainImpl(const ExecuteNetworkParams& params,
for(unsigned int j = 0; j < params.m_Iterations ; ++j)
{
- std::vector<TContainer> inputDataContainers;
+ std::vector<armnn::TContainer> inputDataContainers;
for(unsigned int i = 0; i < numInputs; ++i)
{
// If there are less input files given than required for the execution of
@@ -460,7 +461,7 @@ int MainImpl(const ExecuteNetworkParams& params,
numElements = params.m_InputTensorShapes[i]->GetNumElements();
}
- TContainer tensorData;
+ armnn::TContainer tensorData;
PopulateTensorWithData(tensorData,
numElements,
params.m_InputTypes[i],
@@ -476,7 +477,7 @@ int MainImpl(const ExecuteNetworkParams& params,
for (unsigned int j = 0; j < params.m_Iterations; ++j)
{
- std::vector <TContainer> outputDataContainers;
+ std::vector <armnn::TContainer> outputDataContainers;
for (unsigned int i = 0; i < numOutputs; ++i)
{
if (params.m_OutputTypes[i].compare("float") == 0)
@@ -596,7 +597,7 @@ int MainImpl(const ExecuteNetworkParams& params,
{
ARMNN_LOG(info) << "Asynchronous execution with Arm NN thread pool... \n";
armnn::AsyncCallbackManager callbackManager;
- std::unordered_map<armnn::InferenceId, std::vector<TContainer>&> inferenceOutputMap;
+ std::unordered_map<armnn::InferenceId, std::vector<armnn::TContainer>&> inferenceOutputMap;
// Declare the latest and earliest inference times here to be used when calculating overall time
std::chrono::high_resolution_clock::time_point earliestStartTime;
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index b4432558c4..a69a098eb4 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -9,7 +9,6 @@
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/Filesystem.hpp>
-#include <mapbox/variant.hpp>
#include <cxxopts/cxxopts.hpp>
#include <algorithm>
@@ -297,9 +296,7 @@ int main(int argc, char* argv[])
const unsigned int batchSize = 1;
const armnn::DataLayout outputLayout(cmdline.GetLayout());
- using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>,
- std::vector<int8_t>>;
- std::vector<TContainer> imageDataContainers;
+ std::vector<armnn::TContainer> imageDataContainers;
const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType);
try
{
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index 6d2e549360..c668608f1d 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -5,6 +5,7 @@
#include "../InferenceTestImage.hpp"
+#include <armnn/Utils.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/Permute.hpp>
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index cf3aae137e..e2cd5d9868 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -5,7 +5,9 @@
#pragma once
+
#include <armnn/ArmNN.hpp>
+#include <armnn/Utils.hpp>
#include <armnn/Threadpool.hpp>
#include <armnn/Logging.hpp>
#include <armnn/utility/Timer.hpp>
@@ -371,8 +373,7 @@ public:
using DataType = TDataType;
using Params = InferenceModelInternal::Params;
using QuantizationParams = InferenceModelInternal::QuantizationParams;
- using TContainer
- = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
+
struct CommandLineOptions
{
@@ -583,8 +584,8 @@ public:
}
std::chrono::duration<double, std::milli> Run(
- const std::vector<TContainer>& inputContainers,
- std::vector<TContainer>& outputContainers)
+ const std::vector<armnn::TContainer>& inputContainers,
+ std::vector<armnn::TContainer>& outputContainers)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
{
@@ -632,8 +633,8 @@ public:
std::tuple<unsigned int, std::chrono::duration<double, std::milli>> RunAsync(
armnn::experimental::IWorkingMemHandle& workingMemHandleRef,
- const std::vector<TContainer>& inputContainers,
- std::vector<TContainer>& outputContainers,
+ const std::vector<armnn::TContainer>& inputContainers,
+ std::vector<armnn::TContainer>& outputContainers,
unsigned int inferenceID)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
@@ -683,8 +684,8 @@ public:
}
}
- void RunAsync(const std::vector<TContainer>& inputContainers,
- std::vector<TContainer>& outputContainers,
+ void RunAsync(const std::vector<armnn::TContainer>& inputContainers,
+ std::vector<armnn::TContainer>& outputContainers,
std::shared_ptr<armnn::IAsyncExecutionCallback> cb)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 0cc6c3bdca..d0bb0c00f3 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -7,6 +7,7 @@
#include "InferenceModel.hpp"
#include <armnn/ArmNN.hpp>
+#include <armnn/Utils.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
@@ -110,12 +111,10 @@ template <typename TModel>
class InferenceModelTestCase : public IInferenceTestCase
{
public:
- using TContainer =
- mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
InferenceModelTestCase(TModel& model,
unsigned int testCaseId,
- const std::vector<TContainer>& inputs,
+ const std::vector<armnn::TContainer>& inputs,
const std::vector<unsigned int>& outputSizes)
: m_Model(model)
, m_TestCaseId(testCaseId)
@@ -138,13 +137,13 @@ public:
protected:
unsigned int GetTestCaseId() const { return m_TestCaseId; }
- const std::vector<TContainer>& GetOutputs() const { return m_Outputs; }
+ const std::vector<armnn::TContainer>& GetOutputs() const { return m_Outputs; }
private:
- TModel& m_Model;
- unsigned int m_TestCaseId;
- std::vector<TContainer> m_Inputs;
- std::vector<TContainer> m_Outputs;
+ TModel& m_Model;
+ unsigned int m_TestCaseId;
+ std::vector<armnn::TContainer> m_Inputs;
+ std::vector<armnn::TContainer> m_Outputs;
};
template <typename TTestCaseDatabase, typename TModel>
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 79700d991b..91a90f3820 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -4,6 +4,7 @@
//
#include "InferenceTest.hpp"
+#include <armnn/Utils.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
#include "CxxoptsUtils.hpp"
@@ -26,9 +27,6 @@ namespace armnn
namespace test
{
-using TContainer =
- mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
-
template <typename TTestCaseDatabase, typename TModel>
ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
int& numInferencesRef,
@@ -40,7 +38,7 @@ ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
unsigned int label,
std::vector<typename TModel::DataType> modelInput)
: InferenceModelTestCase<TModel>(
- model, testCaseId, std::vector<TContainer>{ modelInput }, { model.GetOutputSize() })
+ model, testCaseId, std::vector<armnn::TContainer>{ modelInput }, { model.GetOutputSize() })
, m_Label(label)
, m_QuantizationParams(model.GetQuantizationParams())
, m_NumInferencesRef(numInferencesRef)
diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
index 10d6e6a0f8..d1d31f48a1 100644
--- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
+++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
@@ -7,7 +7,9 @@
#include "../InferenceTest.hpp"
#include "ModelAccuracyChecker.hpp"
#include "armnnDeserializer/IDeserializer.hpp"
+
#include <armnnUtils/Filesystem.hpp>
+#include <armnn/Utils.hpp>
#include <cxxopts/cxxopts.hpp>
#include <map>
@@ -253,7 +255,6 @@ int main(int argc, char* argv[])
const map<std::string, std::string> imageNameToLabel = LoadValidationImageFilenamesAndLabels(
validationLabelPath, pathToDataDir.string(), imageBegIndex, imageEndIndex, blacklistPath);
armnnUtils::ModelAccuracyChecker checker(imageNameToLabel, modelOutputLabels);
- using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
if (ValidateDirectory(dataDir))
{
@@ -324,8 +325,8 @@ int main(int argc, char* argv[])
const std::string imageName = imageEntry.first;
std::cout << "Processing image: " << imageName << "\n";
- vector<TContainer> inputDataContainers;
- vector<TContainer> outputDataContainers;
+ vector<armnn::TContainer> inputDataContainers;
+ vector<armnn::TContainer> outputDataContainers;
auto imagePath = pathToDataDir / fs::path(imageName);
switch (inputTensorDataType)
@@ -369,7 +370,7 @@ int main(int argc, char* argv[])
ARMNN_LOG(fatal) << "armnn::IRuntime: Failed to enqueue workload for image: " << imageName;
}
- checker.AddImageResult<TContainer>(imageName, outputDataContainers);
+ checker.AddImageResult<armnn::TContainer>(imageName, outputDataContainers);
}
}
else
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 95f23c991d..323e9fb812 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -228,11 +228,7 @@ void TensorPrinter::WriteToFile(const std::vector<T>& values)
}
}
-using TContainer =
- mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
-using QuantizationParams = std::pair<float, int32_t>;
-
-void PopulateTensorWithData(TContainer& tensorData,
+void PopulateTensorWithData(armnn::TContainer& tensorData,
unsigned int numElements,
const std::string& dataTypeStr,
const armnn::Optional<QuantizationParams>& qParams,
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 0abda4f8ee..d9e2459703 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -9,8 +9,7 @@
#include <armnn/Types.hpp>
#include <armnn/Logging.hpp>
#include <armnn/utility/StringUtils.hpp>
-
-#include <mapbox/variant.hpp>
+#include <armnn/Utils.hpp>
#include <iostream>
#include <fstream>
@@ -52,11 +51,9 @@ private:
bool m_PrintToConsole;
};
-using TContainer =
- mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
using QuantizationParams = std::pair<float, int32_t>;
-void PopulateTensorWithData(TContainer& tensorData,
+void PopulateTensorWithData(armnn::TContainer& tensorData,
unsigned int numElements,
const std::string& dataTypeStr,
const armnn::Optional<QuantizationParams>& qParams,