aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorJames Ward <james.ward@arm.com>2020-09-28 11:56:35 +0100
committerJames Ward <james.ward@arm.com>2020-09-30 17:13:58 +0000
commit6d9f5c57fe80b3b3c08294ddd52062e107151a15 (patch)
treea5aeca515a6f81545d82939ca4031df8677aed12 /tests
parent156113cee2b869810092b42579c31c3b1fd910c5 (diff)
downloadarmnn-6d9f5c57fe80b3b3c08294ddd52062e107151a15.tar.gz
IVGCVSW-4519 Remove Boost Variant and apply_visitor variant
* replace boost::variant with mapbox::util::variant * replace boost::apply_visitor with mapbox::util::apply_visitor * replace boost::get with mapbox::util::get Signed-off-by: James Ward <james.ward@arm.com> Change-Id: I38460cabbcd5e56d4d61151bfe3dcb5681ce696e
Diffstat (limited to 'tests')
-rw-r--r--tests/DeepSpeechV1InferenceTest.hpp6
-rw-r--r--tests/ImageTensorGenerator/ImageTensorGenerator.cpp11
-rw-r--r--tests/InferenceModel.hpp6
-rw-r--r--tests/InferenceTest.hpp2
-rw-r--r--tests/InferenceTest.inl8
-rw-r--r--tests/MobileNetSsdInferenceTest.hpp12
-rw-r--r--tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp2
-rw-r--r--tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp4
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp10
-rw-r--r--tests/YoloInferenceTest.hpp2
10 files changed, 35 insertions, 28 deletions
diff --git a/tests/DeepSpeechV1InferenceTest.hpp b/tests/DeepSpeechV1InferenceTest.hpp
index 28d82bfc21..d859ba7505 100644
--- a/tests/DeepSpeechV1InferenceTest.hpp
+++ b/tests/DeepSpeechV1InferenceTest.hpp
@@ -38,13 +38,13 @@ public:
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
armnn::IgnoreUnused(options);
- const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits
+ const std::vector<float>& output1 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[0]); // logits
ARMNN_ASSERT(output1.size() == k_OutputSize1);
- const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // new_state_c
+ const std::vector<float>& output2 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[1]); // new_state_c
ARMNN_ASSERT(output2.size() == k_OutputSize2);
- const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // new_state_h
+ const std::vector<float>& output3 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[2]); // new_state_h
ARMNN_ASSERT(output3.size() == k_OutputSize3);
// Check each output to see whether it is the expected value
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
index 568ba1ee95..04419d5770 100644
--- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
+++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp
@@ -10,7 +10,7 @@
#include <Filesystem.hpp>
#include <boost/program_options.hpp>
-#include <boost/variant.hpp>
+#include <mapbox/variant.hpp>
#include <algorithm>
#include <fstream>
@@ -259,7 +259,7 @@ int main(int argc, char* argv[])
const unsigned int batchSize = 1;
const armnn::DataLayout outputLayout(cmdline.GetLayout());
- using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
+ using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
std::vector<TContainer> imageDataContainers;
const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType);
try
@@ -291,8 +291,11 @@ int main(int argc, char* argv[])
imageTensorFile.open(outputPath, std::ofstream::out);
if (imageTensorFile.is_open())
{
- boost::apply_visitor([&imageTensorFile](auto&& imageData) { WriteImageTensorImpl(imageData, imageTensorFile); },
- imageDataContainers[0]);
+ mapbox::util::apply_visitor(
+ [&imageTensorFile](auto&& imageData){ WriteImageTensorImpl(imageData,imageTensorFile); },
+ imageDataContainers[0]
+ );
+
if (!imageTensorFile)
{
ARMNN_LOG(fatal) << "Failed to write to output file" << outputPath;
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 0a458c8b64..fd8dedeb29 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -30,8 +30,8 @@
#include <boost/exception/exception.hpp>
#include <boost/exception/diagnostic_information.hpp>
#include <boost/program_options.hpp>
-#include <boost/variant.hpp>
#include <fmt/format.h>
+#include <mapbox/variant.hpp>
#include <algorithm>
#include <iterator>
@@ -327,7 +327,7 @@ public:
using DataType = TDataType;
using Params = InferenceModelInternal::Params;
using QuantizationParams = InferenceModelInternal::QuantizationParams;
- using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+ using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
struct CommandLineOptions
{
@@ -499,7 +499,7 @@ public:
{
const unsigned int expectedOutputDataSize = GetOutputSize(i);
- boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
+ mapbox::util::apply_visitor([expectedOutputDataSize, i](auto&& value)
{
const unsigned int actualOutputDataSize = armnn::numeric_cast<unsigned int>(value.size());
if (actualOutputDataSize < expectedOutputDataSize)
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index e80211ea71..1dafd01c01 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -109,7 +109,7 @@ template <typename TModel>
class InferenceModelTestCase : public IInferenceTestCase
{
public:
- using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+ using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
InferenceModelTestCase(TModel& model,
unsigned int testCaseId,
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 88ce3526e2..e10bb38cb0 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -26,7 +26,7 @@ namespace armnn
namespace test
{
-using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
template <typename TTestCaseDatabase, typename TModel>
ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
@@ -49,7 +49,7 @@ ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase(
{
}
-struct ClassifierResultProcessor : public boost::static_visitor<>
+struct ClassifierResultProcessor
{
using ResultMap = std::map<float,int>;
@@ -118,7 +118,7 @@ TestCaseResult ClassifierTestCase<TTestCaseDatabase, TModel>::ProcessResult(cons
const auto testCaseId = this->GetTestCaseId();
ClassifierResultProcessor resultProcessor(m_QuantizationParams.first, m_QuantizationParams.second);
- boost::apply_visitor(resultProcessor, output);
+ mapbox::util::apply_visitor(resultProcessor, output);
ARMNN_LOG(info) << "= Prediction values for test #" << testCaseId;
auto it = resultProcessor.GetResultMap().rbegin();
@@ -130,7 +130,7 @@ TestCaseResult ClassifierTestCase<TTestCaseDatabase, TModel>::ProcessResult(cons
}
unsigned int prediction = 0;
- boost::apply_visitor([&](auto&& value)
+ mapbox::util::apply_visitor([&](auto&& value)
{
prediction = armnn::numeric_cast<unsigned int>(
std::distance(value.begin(), std::max_element(value.begin(), value.end())));
diff --git a/tests/MobileNetSsdInferenceTest.hpp b/tests/MobileNetSsdInferenceTest.hpp
index e02a4acedd..f426ed2ae4 100644
--- a/tests/MobileNetSsdInferenceTest.hpp
+++ b/tests/MobileNetSsdInferenceTest.hpp
@@ -37,16 +37,20 @@ public:
{
armnn::IgnoreUnused(options);
- const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
+ // bounding boxes
+ const std::vector<float>& output1 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[0]);
ARMNN_ASSERT(output1.size() == k_OutputSize1);
- const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // classes
+ // classes
+ const std::vector<float>& output2 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[1]);
ARMNN_ASSERT(output2.size() == k_OutputSize2);
- const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // scores
+ // scores
+ const std::vector<float>& output3 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[2]);
ARMNN_ASSERT(output3.size() == k_OutputSize3);
- const std::vector<float>& output4 = boost::get<std::vector<float>>(this->GetOutputs()[3]); // valid detections
+ // valid detections
+ const std::vector<float>& output4 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[3]);
ARMNN_ASSERT(output4.size() == k_OutputSize4);
const size_t numDetections = boost::numeric_cast<size_t>(output4[0]);
diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
index be45d9af35..edc7e1cc33 100644
--- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
+++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
@@ -234,7 +234,7 @@ int main(int argc, char* argv[])
const map<std::string, std::string> imageNameToLabel = LoadValidationImageFilenamesAndLabels(
validationLabelPath, pathToDataDir.string(), imageBegIndex, imageEndIndex, blacklistPath);
armnnUtils::ModelAccuracyChecker checker(imageNameToLabel, modelOutputLabels);
- using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
+ using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>;
if (ValidateDirectory(dataDir))
{
diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
index 144a7f291b..6f9a9c8c54 100644
--- a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
+++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
@@ -199,11 +199,11 @@ int main(int argc, char* argv[])
}
// Compares outputs.
- std::vector<float> output0 = boost::get<std::vector<float>>(outputs[0]);
+ std::vector<float> output0 = mapbox::util::get<std::vector<float>>(outputs[0]);
for (unsigned int k = 1; k < networksCount; ++k)
{
- std::vector<float> outputK = boost::get<std::vector<float>>(outputs[k]);
+ std::vector<float> outputK = mapbox::util::get<std::vector<float>>(outputs[k]);
if (!std::equal(output0.begin(), output0.end(), outputK.begin(), outputK.end()))
{
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index b5652df37f..f79d630291 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -29,7 +29,7 @@
#include <ResolveType.hpp>
#include <boost/program_options.hpp>
-#include <boost/variant.hpp>
+#include <mapbox/variant.hpp>
#include <iostream>
#include <fstream>
@@ -186,7 +186,7 @@ void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
computeDevices.end());
}
-struct TensorPrinter : public boost::static_visitor<>
+struct TensorPrinter
{
TensorPrinter(const std::string& binding,
const armnn::TensorInfo& info,
@@ -286,7 +286,7 @@ std::vector<T> GenerateDummyTensorData(unsigned int numElements)
return std::vector<T>(numElements, static_cast<T>(0));
}
-using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
using QuantizationParams = std::pair<float, int32_t>;
void PopulateTensorWithData(TContainer& tensorData,
@@ -385,7 +385,7 @@ int MainImpl(const ExecuteNetworkParams& params,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr,
size_t iterations = 1)
{
- using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+ using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
std::vector<TContainer> inputDataContainers;
@@ -501,7 +501,7 @@ int MainImpl(const ExecuteNetworkParams& params,
infoOut,
outputTensorFile,
params.m_DequantizeOutput);
- boost::apply_visitor(printer, outputDataContainers[i]);
+ mapbox::util::apply_visitor(printer, outputDataContainers[i]);
}
ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp
index 6c783d3c48..81ba0f5127 100644
--- a/tests/YoloInferenceTest.hpp
+++ b/tests/YoloInferenceTest.hpp
@@ -38,7 +38,7 @@ public:
using Boost3dArray = boost::multi_array<float, 3>;
- const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]);
+ const std::vector<float>& output = mapbox::util::get<std::vector<float>>(this->GetOutputs()[0]);
ARMNN_ASSERT(output.size() == YoloOutputSize);
constexpr Boost3dArray::index gridSize = 7;