aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2021-10-28 11:11:35 +0100
committerFrancis Murtagh <francis.murtagh@arm.com>2021-10-28 10:36:25 +0000
commit40d2741b459fb7d55ff034d615678848a8e0af7c (patch)
treee0d89fcf02a2a6c2a0a6b8121105c1371cf57193
parent040719d09b696dcb979b4dcadfc32181cb84301a (diff)
downloadarmnn-40d2741b459fb7d55ff034d615678848a8e0af7c.tar.gz
IVGCVSW-6513: Compilation failure in armnn-mobilenet-quant in ML-Examples
* Move TContainer to armnnUtils library Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: I3c0f895d11b66f6ee224ac689a19d0477f990b98
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/Utils.hpp8
-rw-r--r--include/armnnUtils/TContainer.hpp20
-rw-r--r--src/armnn/test/ModelAccuracyCheckerTest.cpp20
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp14
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.cpp2
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.hpp2
-rw-r--r--tests/InferenceModel.hpp16
-rw-r--r--tests/InferenceTest.hpp11
-rw-r--r--tests/InferenceTest.inl4
-rw-r--r--tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp8
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp2
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp4
13 files changed, 66 insertions, 47 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2def5fe8e0..3d6f663b42 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -76,6 +76,7 @@ list(APPEND armnnUtils_sources
include/armnnUtils/Filesystem.hpp
include/armnnUtils/FloatingPointComparison.hpp
include/armnnUtils/FloatingPointConverter.hpp
+ include/armnnUtils/TContainer.hpp
include/armnnUtils/TensorUtils.hpp
include/armnnUtils/Threads.hpp
include/armnnUtils/Transpose.hpp
@@ -108,7 +109,6 @@ list(APPEND armnnUtils_sources
src/armnnUtils/TensorUtils.cpp
src/armnnUtils/Threads.cpp
src/armnnUtils/Transpose.cpp
- third-party/mapbox/variant.hpp
)
add_library_ex(armnnUtils STATIC ${armnnUtils_sources})
diff --git a/include/armnn/Utils.hpp b/include/armnn/Utils.hpp
index 533117c5d9..7d442ba4b5 100644
--- a/include/armnn/Utils.hpp
+++ b/include/armnn/Utils.hpp
@@ -4,9 +4,8 @@
//
#pragma once
-#include "armnn/TypesUtils.hpp"
+#include <armnn/TypesUtils.hpp>
-#include <mapbox/variant.hpp>
#include <iostream>
namespace armnn
@@ -42,9 +41,4 @@ bool NeonDetected();
const std::string GetVersion();
-// Standard definition of TContainer used by ArmNN, use this definition or add alternative definitions here instead of
-// defining your own.
-using TContainer =
- mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
-
} // namespace armnn
diff --git a/include/armnnUtils/TContainer.hpp b/include/armnnUtils/TContainer.hpp
new file mode 100644
index 0000000000..a55f9df488
--- /dev/null
+++ b/include/armnnUtils/TContainer.hpp
@@ -0,0 +1,20 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/TypesUtils.hpp>
+
+#include <mapbox/variant.hpp>
+
+namespace armnnUtils
+{
+
+// Standard definition of TContainer used by ArmNN, use this definition or add alternative definitions here instead of
+// defining your own.
+ using TContainer =
+ mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
+
+} // namespace armnnUtils
diff --git a/src/armnn/test/ModelAccuracyCheckerTest.cpp b/src/armnn/test/ModelAccuracyCheckerTest.cpp
index 47ae3f4b0a..59711a5397 100644
--- a/src/armnn/test/ModelAccuracyCheckerTest.cpp
+++ b/src/armnn/test/ModelAccuracyCheckerTest.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
#include "ModelAccuracyChecker.hpp"
-#include <armnn/Utils.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <doctest/doctest.h>
@@ -60,12 +60,12 @@ TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
// Add image 1 and check accuracy
std::vector<float> inferenceOutputVector1 = {0.05f, 0.10f, 0.70f, 0.15f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
- armnn::TContainer inference1Container(inferenceOutputVector1);
- std::vector<armnn::TContainer> outputTensor1;
+ armnnUtils::TContainer inference1Container(inferenceOutputVector1);
+ std::vector<armnnUtils::TContainer> outputTensor1;
outputTensor1.push_back(inference1Container);
std::string imageName = "val_01.JPEG";
- checker.AddImageResult<armnn::TContainer>(imageName, outputTensor1);
+ checker.AddImageResult<armnnUtils::TContainer>(imageName, outputTensor1);
// Top 1 Accuracy
float totalAccuracy = checker.GetAccuracy(1);
@@ -73,12 +73,12 @@ TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
// Add image 2 and check accuracy
std::vector<float> inferenceOutputVector2 = {0.10f, 0.0f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
- armnn::TContainer inference2Container(inferenceOutputVector2);
- std::vector<armnn::TContainer> outputTensor2;
+ armnnUtils::TContainer inference2Container(inferenceOutputVector2);
+ std::vector<armnnUtils::TContainer> outputTensor2;
outputTensor2.push_back(inference2Container);
imageName = "val_02.JPEG";
- checker.AddImageResult<armnn::TContainer>(imageName, outputTensor2);
+ checker.AddImageResult<armnnUtils::TContainer>(imageName, outputTensor2);
// Top 1 Accuracy
totalAccuracy = checker.GetAccuracy(1);
@@ -90,12 +90,12 @@ TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
// Add image 3 and check accuracy
std::vector<float> inferenceOutputVector3 = {0.0f, 0.10f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
- armnn::TContainer inference3Container(inferenceOutputVector3);
- std::vector<armnn::TContainer> outputTensor3;
+ armnnUtils::TContainer inference3Container(inferenceOutputVector3);
+ std::vector<armnnUtils::TContainer> outputTensor3;
outputTensor3.push_back(inference3Container);
imageName = "val_03.JPEG";
- checker.AddImageResult<armnn::TContainer>(imageName, outputTensor3);
+ checker.AddImageResult<armnnUtils::TContainer>(imageName, outputTensor3);
// Top 1 Accuracy
totalAccuracy = checker.GetAccuracy(1);
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index db15872ad6..dd3c0a32a1 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -9,8 +9,8 @@
#include <AsyncExecutionCallback.hpp>
#include <armnn/Logging.hpp>
-#include <armnn/Utils.hpp>
#include <armnnUtils/Filesystem.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <InferenceTest.hpp>
#if defined(ARMNN_SERIALIZER)
@@ -370,8 +370,8 @@ int MainImpl(const ExecuteNetworkParams& params,
{
using namespace std::chrono;
- std::vector<std::vector<armnn::TContainer>> inputs;
- std::vector<std::vector<armnn::TContainer>> outputs;
+ std::vector<std::vector<armnnUtils::TContainer>> inputs;
+ std::vector<std::vector<armnnUtils::TContainer>> outputs;
try
{
@@ -436,7 +436,7 @@ int MainImpl(const ExecuteNetworkParams& params,
for(unsigned int j = 0; j < params.m_Iterations ; ++j)
{
- std::vector<armnn::TContainer> inputDataContainers;
+ std::vector<armnnUtils::TContainer> inputDataContainers;
for(unsigned int i = 0; i < numInputs; ++i)
{
// If there are less input files given than required for the execution of
@@ -460,7 +460,7 @@ int MainImpl(const ExecuteNetworkParams& params,
numElements = params.m_InputTensorShapes[i]->GetNumElements();
}
- armnn::TContainer tensorData;
+ armnnUtils::TContainer tensorData;
PopulateTensorWithData(tensorData,
numElements,
params.m_InputTypes[i],
@@ -476,7 +476,7 @@ int MainImpl(const ExecuteNetworkParams& params,
for (unsigned int j = 0; j < params.m_Iterations; ++j)
{
- std::vector <armnn::TContainer> outputDataContainers;
+ std::vector <armnnUtils::TContainer> outputDataContainers;
for (unsigned int i = 0; i < numOutputs; ++i)
{
if (params.m_OutputTypes[i].compare("float") == 0)
@@ -596,7 +596,7 @@ int MainImpl(const ExecuteNetworkParams& params,
{
ARMNN_LOG(info) << "Asynchronous execution with Arm NN thread pool... \n";
armnn::AsyncCallbackManager callbackManager;
- std::unordered_map<armnn::InferenceId, std::vector<armnn::TContainer>&> inferenceOutputMap;
+ std::unordered_map<armnn::InferenceId, std::vector<armnnUtils::TContainer>&> inferenceOutputMap;
// Declare the latest and earliest inference times here to be used when calculating overall time
std::chrono::high_resolution_clock::time_point earliestStartTime;
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index a69a098eb4..0f1cf6d890 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -296,7 +296,7 @@ int main(int argc, char* argv[])
const unsigned int batchSize = 1;
const armnn::DataLayout outputLayout(cmdline.GetLayout());
- std::vector<armnn::TContainer> imageDataContainers;
+ std::vector<armnnUtils::TContainer> imageDataContainers;
const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType);
try
{
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
index c668608f1d..b9579e7636 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp
@@ -5,9 +5,9 @@
#include "../InferenceTestImage.hpp"
-#include <armnn/Utils.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <armnnUtils/Permute.hpp>
#include <algorithm>
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index e2cd5d9868..13f7d74e8d 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -7,13 +7,15 @@
#include <armnn/ArmNN.hpp>
-#include <armnn/Utils.hpp>
#include <armnn/Threadpool.hpp>
#include <armnn/Logging.hpp>
#include <armnn/utility/Timer.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
+
+#include <armnnUtils/TContainer.hpp>
+
#include <common/include/ProfilingGuid.hpp>
#if defined(ARMNN_SERIALIZER)
@@ -584,8 +586,8 @@ public:
}
std::chrono::duration<double, std::milli> Run(
- const std::vector<armnn::TContainer>& inputContainers,
- std::vector<armnn::TContainer>& outputContainers)
+ const std::vector<armnnUtils::TContainer>& inputContainers,
+ std::vector<armnnUtils::TContainer>& outputContainers)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
{
@@ -633,8 +635,8 @@ public:
std::tuple<unsigned int, std::chrono::duration<double, std::milli>> RunAsync(
armnn::experimental::IWorkingMemHandle& workingMemHandleRef,
- const std::vector<armnn::TContainer>& inputContainers,
- std::vector<armnn::TContainer>& outputContainers,
+ const std::vector<armnnUtils::TContainer>& inputContainers,
+ std::vector<armnnUtils::TContainer>& outputContainers,
unsigned int inferenceID)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
@@ -684,8 +686,8 @@ public:
}
}
- void RunAsync(const std::vector<armnn::TContainer>& inputContainers,
- std::vector<armnn::TContainer>& outputContainers,
+ void RunAsync(const std::vector<armnnUtils::TContainer>& inputContainers,
+ std::vector<armnnUtils::TContainer>& outputContainers,
std::shared_ptr<armnn::IAsyncExecutionCallback> cb)
{
for (unsigned int i = 0; i < outputContainers.size(); ++i)
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index d0bb0c00f3..fb9c048488 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -7,11 +7,12 @@
#include "InferenceModel.hpp"
#include <armnn/ArmNN.hpp>
-#include <armnn/Utils.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnnUtils/TContainer.hpp>
+
#include <cxxopts/cxxopts.hpp>
#include <fmt/format.h>
@@ -114,7 +115,7 @@ public:
InferenceModelTestCase(TModel& model,
unsigned int testCaseId,
- const std::vector<armnn::TContainer>& inputs,
+ const std::vector<armnnUtils::TContainer>& inputs,
const std::vector<unsigned int>& outputSizes)
: m_Model(model)
, m_TestCaseId(testCaseId)
@@ -137,13 +138,13 @@ public:
protected:
unsigned int GetTestCaseId() const { return m_TestCaseId; }
- const std::vector<armnn::TContainer>& GetOutputs() const { return m_Outputs; }
+ const std::vector<armnnUtils::TContainer>& GetOutputs() const { return m_Outputs; }
private:
TModel& m_Model;
unsigned int m_TestCaseId;
- std::vector<armnn::TContainer> m_Inputs;
- std::vector<armnn::TContainer> m_Outputs;
+ std::vector<armnnUtils::TContainer> m_Inputs;
+ std::vector<armnnUtils::TContainer> m_Outputs;
};
template <typename TTestCaseDatabase, typename TModel>
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 91a90f3820..b6087c5e5a 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -7,6 +7,8 @@
#include <armnn/Utils.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
+#include <armnnUtils/TContainer.hpp>
+
#include "CxxoptsUtils.hpp"
#include <cxxopts/cxxopts.hpp>
@@ -38,7 +40,7 @@ ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
unsigned int label,
std::vector<typename TModel::DataType> modelInput)
: InferenceModelTestCase<TModel>(
- model, testCaseId, std::vector<armnn::TContainer>{ modelInput }, { model.GetOutputSize() })
+ model, testCaseId, std::vector<armnnUtils::TContainer>{ modelInput }, { model.GetOutputSize() })
, m_Label(label)
, m_QuantizationParams(model.GetQuantizationParams())
, m_NumInferencesRef(numInferencesRef)
diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
index d1d31f48a1..c08d88e053 100644
--- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
+++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
@@ -9,7 +9,7 @@
#include "armnnDeserializer/IDeserializer.hpp"
#include <armnnUtils/Filesystem.hpp>
-#include <armnn/Utils.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <cxxopts/cxxopts.hpp>
#include <map>
@@ -325,8 +325,8 @@ int main(int argc, char* argv[])
const std::string imageName = imageEntry.first;
std::cout << "Processing image: " << imageName << "\n";
- vector<armnn::TContainer> inputDataContainers;
- vector<armnn::TContainer> outputDataContainers;
+ vector<armnnUtils::TContainer> inputDataContainers;
+ vector<armnnUtils::TContainer> outputDataContainers;
auto imagePath = pathToDataDir / fs::path(imageName);
switch (inputTensorDataType)
@@ -370,7 +370,7 @@ int main(int argc, char* argv[])
ARMNN_LOG(fatal) << "armnn::IRuntime: Failed to enqueue workload for image: " << imageName;
}
- checker.AddImageResult<armnn::TContainer>(imageName, outputDataContainers);
+ checker.AddImageResult<armnnUtils::TContainer>(imageName, outputDataContainers);
}
}
else
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
index 323e9fb812..6c74aaa6ed 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
@@ -228,7 +228,7 @@ void TensorPrinter::WriteToFile(const std::vector<T>& values)
}
}
-void PopulateTensorWithData(armnn::TContainer& tensorData,
+void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
unsigned int numElements,
const std::string& dataTypeStr,
const armnn::Optional<QuantizationParams>& qParams,
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index d9e2459703..bc2868ab35 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -9,7 +9,7 @@
#include <armnn/Types.hpp>
#include <armnn/Logging.hpp>
#include <armnn/utility/StringUtils.hpp>
-#include <armnn/Utils.hpp>
+#include <armnnUtils/TContainer.hpp>
#include <iostream>
#include <fstream>
@@ -53,7 +53,7 @@ private:
using QuantizationParams = std::pair<float, int32_t>;
-void PopulateTensorWithData(armnn::TContainer& tensorData,
+void PopulateTensorWithData(armnnUtils::TContainer& tensorData,
unsigned int numElements,
const std::string& dataTypeStr,
const armnn::Optional<QuantizationParams>& qParams,