diff options
author | David Beck <david.beck@arm.com> | 2018-10-19 15:20:56 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-22 16:57:54 +0100 |
commit | f0b4845c1c6f24f59d4c88473b852cf69a3c7ae9 (patch) | |
tree | 5a8726ee4a397c421a6a41d6edca1a2d3183f168 /tests/InferenceModel.hpp | |
parent | 7bc8c9fc9726d3c9ac002138c594688a006faac6 (diff) | |
download | armnn-f0b4845c1c6f24f59d4c88473b852cf69a3c7ae9.tar.gz |
IVGCVSW-2019 : replace Compute enum in the backend preferences list
Change-Id: Ie7549fd27378acfa97e68d098e338b8c9d4ea5d2
Diffstat (limited to 'tests/InferenceModel.hpp')
-rw-r--r-- | tests/InferenceModel.hpp | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 2e0aff981a..8645c9041a 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -3,15 +3,15 @@ // SPDX-License-Identifier: MIT // #pragma once -#include "armnn/ArmNN.hpp" +#include <armnn/ArmNN.hpp> #if defined(ARMNN_TF_LITE_PARSER) -#include "armnnTfLiteParser/ITfLiteParser.hpp" +#include <armnnTfLiteParser/ITfLiteParser.hpp> #endif #include <HeapProfiling.hpp> #if defined(ARMNN_ONNX_PARSER) -#include "armnnOnnxParser/IOnnxParser.hpp" +#include <armnnOnnxParser/IOnnxParser.hpp> #endif #include <boost/exception/exception.hpp> @@ -20,6 +20,7 @@ #include <boost/format.hpp> #include <boost/program_options.hpp> #include <boost/filesystem.hpp> +#include <boost/lexical_cast.hpp> #include <map> #include <string> @@ -40,7 +41,7 @@ struct Params std::string m_InputBinding; std::string m_OutputBinding; const armnn::TensorShape* m_InputTensorShape; - std::vector<armnn::Compute> m_ComputeDevice; + std::vector<armnn::BackendId> m_ComputeDevice; bool m_EnableProfiling; size_t m_SubgraphId; bool m_IsModelBinary; @@ -195,8 +196,6 @@ inline armnn::OutputTensors MakeOutputTensors(const InferenceModelInternal::Bind return { { output.first, armnn::Tensor(output.second, outputTensorData.data()) } }; } - - template <typename IParser, typename TDataType> class InferenceModel { @@ -207,7 +206,7 @@ public: struct CommandLineOptions { std::string m_ModelDir; - std::vector<armnn::Compute> m_ComputeDevice; + std::vector<armnn::BackendId> m_ComputeDevice; bool m_VisualizePostOptimizationModel; bool m_EnableFp16TurboMode; }; @@ -216,11 +215,13 @@ public: { namespace po = boost::program_options; + std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef}; + desc.add_options() ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(), "Path to directory containing model files (.caffemodel/.prototxt/.tflite)") - ("compute,c", po::value<std::vector<armnn::Compute>>(&options.m_ComputeDevice)->default_value - ({armnn::Compute::CpuAcc, armnn::Compute::CpuRef}), + ("compute,c", po::value<std::vector<armnn::BackendId>>(&options.m_ComputeDevice)->default_value + (defaultBackends), "Which device to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc") ("visualize-optimized-model,v", po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false), |