diff options
-rw-r--r-- | src/armnn/NetworkQuantizer.cpp | 4 | ||||
-rw-r--r-- | src/armnn/test/ModelAccuracyCheckerTest.cpp | 3 | ||||
-rw-r--r-- | src/armnnUtils/ModelAccuracyChecker.hpp | 4 | ||||
-rw-r--r-- | src/armnnUtils/TensorIOUtils.hpp | 6 | ||||
-rw-r--r-- | tests/DeepSpeechV1InferenceTest.hpp | 6 | ||||
-rw-r--r-- | tests/ImageTensorGenerator/ImageTensorGenerator.cpp | 11 | ||||
-rw-r--r-- | tests/InferenceModel.hpp | 6 | ||||
-rw-r--r-- | tests/InferenceTest.hpp | 2 | ||||
-rw-r--r-- | tests/InferenceTest.inl | 8 | ||||
-rw-r--r-- | tests/MobileNetSsdInferenceTest.hpp | 12 | ||||
-rw-r--r-- | tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp | 2 | ||||
-rw-r--r-- | tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp | 4 | ||||
-rw-r--r-- | tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp | 10 | ||||
-rw-r--r-- | tests/YoloInferenceTest.hpp | 2 |
14 files changed, 43 insertions, 37 deletions
diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp index 3712c7be3a..e6becee96f 100644 --- a/src/armnn/NetworkQuantizer.cpp +++ b/src/armnn/NetworkQuantizer.cpp @@ -23,7 +23,7 @@ #include <armnnUtils/TensorUtils.hpp> #include <armnn/utility/PolymorphicDowncast.hpp> -#include <boost/variant.hpp> +#include <mapbox/variant.hpp> #include <vector> #include <cmath> @@ -31,7 +31,7 @@ namespace armnn { -using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; +using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; INetworkQuantizer* INetworkQuantizer::CreateRaw(INetwork* inputNetwork, const QuantizerOptions& options) { diff --git a/src/armnn/test/ModelAccuracyCheckerTest.cpp b/src/armnn/test/ModelAccuracyCheckerTest.cpp index c6c93edc3c..55ac19a406 100644 --- a/src/armnn/test/ModelAccuracyCheckerTest.cpp +++ b/src/armnn/test/ModelAccuracyCheckerTest.cpp @@ -6,7 +6,6 @@ #include <boost/test/unit_test.hpp> -#include <boost/variant.hpp> #include <iostream> #include <string> @@ -53,7 +52,7 @@ struct TestHelper BOOST_AUTO_TEST_SUITE(ModelAccuracyCheckerTest) -using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; +using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; BOOST_FIXTURE_TEST_CASE(TestFloat32OutputTensorAccuracy, TestHelper) { diff --git a/src/armnnUtils/ModelAccuracyChecker.hpp b/src/armnnUtils/ModelAccuracyChecker.hpp index 6595a52a98..64409d6904 100644 --- a/src/armnnUtils/ModelAccuracyChecker.hpp +++ b/src/armnnUtils/ModelAccuracyChecker.hpp @@ -8,7 +8,7 @@ #include <algorithm> #include <armnn/Types.hpp> #include <armnn/utility/Assert.hpp> -#include <boost/variant/apply_visitor.hpp> +#include <mapbox/variant.hpp> #include <cstddef> #include <functional> #include <iostream> @@ -79,7 +79,7 @@ public: auto& output = outputTensor[0]; // Create a map of all predictions - boost::apply_visitor([&confidenceMap](auto && value) + mapbox::util::apply_visitor([&confidenceMap](auto && value) { int index = 0; for (const auto & o : value) diff --git a/src/armnnUtils/TensorIOUtils.hpp b/src/armnnUtils/TensorIOUtils.hpp index 1dc7f21857..098b4dadec 100644 --- a/src/armnnUtils/TensorIOUtils.hpp +++ b/src/armnnUtils/TensorIOUtils.hpp @@ -8,7 +8,7 @@ #include <armnn/Tensor.hpp> #include <boost/format.hpp> -#include <boost/variant/apply_visitor.hpp> +#include <mapbox/variant.hpp> namespace armnnUtils { @@ -33,7 +33,7 @@ inline armnn::InputTensors MakeInputTensors(const std::vector<armnn::BindingPoin const armnn::BindingPointInfo& inputBinding = inputBindings[i]; const TContainer& inputData = inputDataContainers[i]; - boost::apply_visitor([&](auto&& value) + mapbox::util::apply_visitor([&](auto&& value) { if (value.size() != inputBinding.second.GetNumElements()) { @@ -72,7 +72,7 @@ inline armnn::OutputTensors MakeOutputTensors(const std::vector<armnn::BindingPo const armnn::BindingPointInfo& outputBinding = outputBindings[i]; TContainer& outputData = outputDataContainers[i]; - boost::apply_visitor([&](auto&& value) + mapbox::util::apply_visitor([&](auto&& value) { if (value.size() != outputBinding.second.GetNumElements()) { diff --git a/tests/DeepSpeechV1InferenceTest.hpp b/tests/DeepSpeechV1InferenceTest.hpp index 28d82bfc21..d859ba7505 100644 --- a/tests/DeepSpeechV1InferenceTest.hpp +++ b/tests/DeepSpeechV1InferenceTest.hpp @@ -38,13 +38,13 @@ public: TestCaseResult ProcessResult(const InferenceTestOptions& options) override { armnn::IgnoreUnused(options); - const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits + const std::vector<float>& output1 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[0]); // logits ARMNN_ASSERT(output1.size() == k_OutputSize1); - const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // new_state_c + const std::vector<float>& output2 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[1]); // new_state_c ARMNN_ASSERT(output2.size() == k_OutputSize2); - const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // new_state_h + const std::vector<float>& output3 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[2]); // new_state_h ARMNN_ASSERT(output3.size() == k_OutputSize3); // Check each output to see whether it is the expected value diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp index 568ba1ee95..04419d5770 100644 --- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp +++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp @@ -10,7 +10,7 @@ #include <Filesystem.hpp> #include <boost/program_options.hpp> -#include <boost/variant.hpp> +#include <mapbox/variant.hpp> #include <algorithm> #include <fstream> @@ -259,7 +259,7 @@ int main(int argc, char* argv[]) const unsigned int batchSize = 1; const armnn::DataLayout outputLayout(cmdline.GetLayout()); - using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>; + using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>; std::vector<TContainer> imageDataContainers; const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType); try @@ -291,8 +291,11 @@ int main(int argc, char* argv[]) imageTensorFile.open(outputPath, std::ofstream::out); if (imageTensorFile.is_open()) { - boost::apply_visitor([&imageTensorFile](auto&& imageData) { WriteImageTensorImpl(imageData, imageTensorFile); }, - imageDataContainers[0]); + mapbox::util::apply_visitor( + [&imageTensorFile](auto&& imageData){ WriteImageTensorImpl(imageData,imageTensorFile); }, + imageDataContainers[0] + ); + if (!imageTensorFile) { ARMNN_LOG(fatal) << "Failed to write to output file" << outputPath; diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 0a458c8b64..fd8dedeb29 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -30,8 +30,8 @@ #include <boost/exception/exception.hpp> #include <boost/exception/diagnostic_information.hpp> #include <boost/program_options.hpp> -#include <boost/variant.hpp> #include <fmt/format.h> +#include <mapbox/variant.hpp> #include <algorithm> #include <iterator> @@ -327,7 +327,7 @@ public: using DataType = TDataType; using Params = InferenceModelInternal::Params; using QuantizationParams = InferenceModelInternal::QuantizationParams; - using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; + using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; struct CommandLineOptions { @@ -499,7 +499,7 @@ public: { const unsigned int expectedOutputDataSize = GetOutputSize(i); - boost::apply_visitor([expectedOutputDataSize, i](auto&& value) + mapbox::util::apply_visitor([expectedOutputDataSize, i](auto&& value) { const unsigned int actualOutputDataSize = armnn::numeric_cast<unsigned int>(value.size()); if (actualOutputDataSize < expectedOutputDataSize) diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp index e80211ea71..1dafd01c01 100644 --- a/tests/InferenceTest.hpp +++ b/tests/InferenceTest.hpp @@ -109,7 +109,7 @@ template <typename TModel> class InferenceModelTestCase : public IInferenceTestCase { public: - using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; + using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; InferenceModelTestCase(TModel& model, unsigned int testCaseId, diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl index 88ce3526e2..e10bb38cb0 100644 --- a/tests/InferenceTest.inl +++ b/tests/InferenceTest.inl @@ -26,7 +26,7 @@ namespace armnn namespace test { -using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; +using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; template <typename TTestCaseDatabase, typename TModel> ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase( @@ -49,7 +49,7 @@ ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase( { } -struct ClassifierResultProcessor : public boost::static_visitor<> +struct ClassifierResultProcessor { using ResultMap = std::map<float,int>; @@ -118,7 +118,7 @@ TestCaseResult ClassifierTestCase<TTestCaseDatabase, TModel>::ProcessResult(cons const auto testCaseId = this->GetTestCaseId(); ClassifierResultProcessor resultProcessor(m_QuantizationParams.first, m_QuantizationParams.second); - boost::apply_visitor(resultProcessor, output); + mapbox::util::apply_visitor(resultProcessor, output); ARMNN_LOG(info) << "= Prediction values for test #" << testCaseId; auto it = resultProcessor.GetResultMap().rbegin(); @@ -130,7 +130,7 @@ TestCaseResult ClassifierTestCase<TTestCaseDatabase, TModel>::ProcessResult(cons } unsigned int prediction = 0; - boost::apply_visitor([&](auto&& value) + mapbox::util::apply_visitor([&](auto&& value) { prediction = armnn::numeric_cast<unsigned int>( std::distance(value.begin(), std::max_element(value.begin(), value.end()))); diff --git a/tests/MobileNetSsdInferenceTest.hpp b/tests/MobileNetSsdInferenceTest.hpp index e02a4acedd..f426ed2ae4 100644 --- a/tests/MobileNetSsdInferenceTest.hpp +++ b/tests/MobileNetSsdInferenceTest.hpp @@ -37,16 +37,20 @@ public: { armnn::IgnoreUnused(options); - const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes + // bounding boxes + const std::vector<float>& output1 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[0]); ARMNN_ASSERT(output1.size() == k_OutputSize1); - const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // classes + // classes + const std::vector<float>& output2 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[1]); ARMNN_ASSERT(output2.size() == k_OutputSize2); - const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // scores + // scores + const std::vector<float>& output3 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[2]); ARMNN_ASSERT(output3.size() == k_OutputSize3); - const std::vector<float>& output4 = boost::get<std::vector<float>>(this->GetOutputs()[3]); // valid detections + // valid detections + const std::vector<float>& output4 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[3]); ARMNN_ASSERT(output4.size() == k_OutputSize4); const size_t numDetections = boost::numeric_cast<size_t>(output4[0]); diff --git a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp index be45d9af35..edc7e1cc33 100644 --- a/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp +++ b/tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp @@ -234,7 +234,7 @@ int main(int argc, char* argv[]) const map<std::string, std::string> imageNameToLabel = LoadValidationImageFilenamesAndLabels( validationLabelPath, pathToDataDir.string(), imageBegIndex, imageEndIndex, blacklistPath); armnnUtils::ModelAccuracyChecker checker(imageNameToLabel, modelOutputLabels); - using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>; + using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>; if (ValidateDirectory(dataDir)) { diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp index 144a7f291b..6f9a9c8c54 100644 --- a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp +++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp @@ -199,11 +199,11 @@ int main(int argc, char* argv[]) } // Compares outputs. - std::vector<float> output0 = boost::get<std::vector<float>>(outputs[0]); + std::vector<float> output0 = mapbox::util::get<std::vector<float>>(outputs[0]); for (unsigned int k = 1; k < networksCount; ++k) { - std::vector<float> outputK = boost::get<std::vector<float>>(outputs[k]); + std::vector<float> outputK = mapbox::util::get<std::vector<float>>(outputs[k]); if (!std::equal(output0.begin(), output0.end(), outputK.begin(), outputK.end())) { diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp index b5652df37f..f79d630291 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp @@ -29,7 +29,7 @@ #include <ResolveType.hpp> #include <boost/program_options.hpp> -#include <boost/variant.hpp> +#include <mapbox/variant.hpp> #include <iostream> #include <fstream> @@ -186,7 +186,7 @@ void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices) computeDevices.end()); } -struct TensorPrinter : public boost::static_visitor<> +struct TensorPrinter { TensorPrinter(const std::string& binding, const armnn::TensorInfo& info, @@ -286,7 +286,7 @@ std::vector<T> GenerateDummyTensorData(unsigned int numElements) return std::vector<T>(numElements, static_cast<T>(0)); } -using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; +using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; using QuantizationParams = std::pair<float, int32_t>; void PopulateTensorWithData(TContainer& tensorData, @@ -385,7 +385,7 @@ int MainImpl(const ExecuteNetworkParams& params, const std::shared_ptr<armnn::IRuntime>& runtime = nullptr, size_t iterations = 1) { - using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; + using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; std::vector<TContainer> inputDataContainers; @@ -501,7 +501,7 @@ int MainImpl(const ExecuteNetworkParams& params, infoOut, outputTensorFile, params.m_DequantizeOutput); - boost::apply_visitor(printer, outputDataContainers[i]); + mapbox::util::apply_visitor(printer, outputDataContainers[i]); } ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2) diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp index 6c783d3c48..81ba0f5127 100644 --- a/tests/YoloInferenceTest.hpp +++ b/tests/YoloInferenceTest.hpp @@ -38,7 +38,7 @@ public: using Boost3dArray = boost::multi_array<float, 3>; - const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]); + const std::vector<float>& output = mapbox::util::get<std::vector<float>>(this->GetOutputs()[0]); ARMNN_ASSERT(output.size() == YoloOutputSize); constexpr Boost3dArray::index gridSize = 7; |