aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-10-19 15:20:56 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:54 +0100
commitf0b4845c1c6f24f59d4c88473b852cf69a3c7ae9 (patch)
tree5a8726ee4a397c421a6a41d6edca1a2d3183f168 /tests
parent7bc8c9fc9726d3c9ac002138c594688a006faac6 (diff)
downloadarmnn-f0b4845c1c6f24f59d4c88473b852cf69a3c7ae9.tar.gz
IVGCVSW-2019 : replace Compute enum in the backend preferences list
Change-Id: Ie7549fd27378acfa97e68d098e338b8c9d4ea5d2
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp18
-rw-r--r--tests/InferenceModel.hpp19
-rw-r--r--tests/InferenceTest.hpp18
-rw-r--r--tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp8
4 files changed, 39 insertions, 24 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index ee207472d0..7f1bcd38dc 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -147,7 +147,7 @@ void PrintArray(const std::vector<float>& v)
printf("\n");
}
-void RemoveDuplicateDevices(std::vector<armnn::Compute>& computeDevices)
+void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
{
// Mark the duplicate devices as 'Undefined'.
for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
@@ -166,11 +166,11 @@ void RemoveDuplicateDevices(std::vector<armnn::Compute>& computeDevices)
computeDevices.end());
}
-bool CheckDevicesAreValid(const std::vector<armnn::Compute>& computeDevices)
+bool CheckDevicesAreValid(const std::vector<armnn::BackendId>& computeDevices)
{
return (!computeDevices.empty()
&& std::none_of(computeDevices.begin(), computeDevices.end(),
- [](armnn::Compute c){ return c == armnn::Compute::Undefined; }));
+ [](armnn::BackendId c){ return c == armnn::Compute::Undefined; }));
}
} // namespace
@@ -178,7 +178,7 @@ bool CheckDevicesAreValid(const std::vector<armnn::Compute>& computeDevices)
template<typename TParser, typename TDataType>
int MainImpl(const char* modelPath,
bool isModelBinary,
- const std::vector<armnn::Compute>& computeDevice,
+ const std::vector<armnn::BackendId>& computeDevice,
const char* inputName,
const armnn::TensorShape* inputTensorShape,
const char* inputTensorDataFilePath,
@@ -232,7 +232,7 @@ int MainImpl(const char* modelPath,
// This will run a test
int RunTest(const std::string& modelFormat,
const std::string& inputTensorShapeStr,
- const vector<armnn::Compute>& computeDevice,
+ const vector<armnn::BackendId>& computeDevice,
const std::string& modelPath,
const std::string& inputName,
const std::string& inputTensorDataFilePath,
@@ -360,7 +360,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow,
"caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or tensorflow-text.")
("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt, .tflite,"
" .onnx")
- ("compute,c", po::value<std::vector<armnn::Compute>>()->multitoken(),
+ ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
"The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
@@ -414,7 +414,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow,
boost::trim(outputName);
// Get the preferred order of compute devices.
- std::vector<armnn::Compute> computeDevices = vm["compute"].as<std::vector<armnn::Compute>>();
+ std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
// Remove duplicates from the list of compute devices.
RemoveDuplicateDevices(computeDevices);
@@ -466,7 +466,7 @@ int main(int argc, const char* argv[])
"caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or tensorflow-text.")
("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt,"
" .tflite, .onnx")
- ("compute,c", po::value<std::vector<armnn::Compute>>()->multitoken(),
+ ("compute,c", po::value<std::vector<std::string>>()->multitoken(),
"The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
@@ -588,7 +588,7 @@ int main(int argc, const char* argv[])
else // Run single test
{
// Get the preferred order of compute devices.
- std::vector<armnn::Compute> computeDevices = vm["compute"].as<std::vector<armnn::Compute>>();
+ std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
// Remove duplicates from the list of compute devices.
RemoveDuplicateDevices(computeDevices);
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 2e0aff981a..8645c9041a 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -3,15 +3,15 @@
// SPDX-License-Identifier: MIT
//
#pragma once
-#include "armnn/ArmNN.hpp"
+#include <armnn/ArmNN.hpp>
#if defined(ARMNN_TF_LITE_PARSER)
-#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#include <armnnTfLiteParser/ITfLiteParser.hpp>
#endif
#include <HeapProfiling.hpp>
#if defined(ARMNN_ONNX_PARSER)
-#include "armnnOnnxParser/IOnnxParser.hpp"
+#include <armnnOnnxParser/IOnnxParser.hpp>
#endif
#include <boost/exception/exception.hpp>
@@ -20,6 +20,7 @@
#include <boost/format.hpp>
#include <boost/program_options.hpp>
#include <boost/filesystem.hpp>
+#include <boost/lexical_cast.hpp>
#include <map>
#include <string>
@@ -40,7 +41,7 @@ struct Params
std::string m_InputBinding;
std::string m_OutputBinding;
const armnn::TensorShape* m_InputTensorShape;
- std::vector<armnn::Compute> m_ComputeDevice;
+ std::vector<armnn::BackendId> m_ComputeDevice;
bool m_EnableProfiling;
size_t m_SubgraphId;
bool m_IsModelBinary;
@@ -195,8 +196,6 @@ inline armnn::OutputTensors MakeOutputTensors(const InferenceModelInternal::Bind
return { { output.first, armnn::Tensor(output.second, outputTensorData.data()) } };
}
-
-
template <typename IParser, typename TDataType>
class InferenceModel
{
@@ -207,7 +206,7 @@ public:
struct CommandLineOptions
{
std::string m_ModelDir;
- std::vector<armnn::Compute> m_ComputeDevice;
+ std::vector<armnn::BackendId> m_ComputeDevice;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
};
@@ -216,11 +215,13 @@ public:
{
namespace po = boost::program_options;
+ std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef};
+
desc.add_options()
("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
"Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
- ("compute,c", po::value<std::vector<armnn::Compute>>(&options.m_ComputeDevice)->default_value
- ({armnn::Compute::CpuAcc, armnn::Compute::CpuRef}),
+ ("compute,c", po::value<std::vector<armnn::BackendId>>(&options.m_ComputeDevice)->default_value
+ (defaultBackends),
"Which device to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
("visualize-optimized-model,v",
po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 32d828ddbc..3ea70962d2 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -4,8 +4,8 @@
//
#pragma once
-#include "armnn/ArmNN.hpp"
-#include "armnn/TypesUtils.hpp"
+#include <armnn/ArmNN.hpp>
+#include <armnn/TypesUtils.hpp>
#include "InferenceModel.hpp"
#include <Logging.hpp>
@@ -30,6 +30,20 @@ inline std::istream& operator>>(std::istream& in, armnn::Compute& compute)
return in;
}
+inline std::istream& operator>>(std::istream& in, armnn::BackendId& backend)
+{
+ std::string token;
+ in >> token;
+ armnn::Compute compute = armnn::ParseComputeDevice(token.c_str());
+ if (compute == armnn::Compute::Undefined)
+ {
+ in.setstate(std::ios_base::failbit);
+ throw boost::program_options::validation_error(boost::program_options::validation_error::invalid_option_value);
+ }
+ backend = compute;
+ return in;
+}
+
namespace test
{
diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
index 34fdbf0867..f9fdf8b3ea 100644
--- a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
+++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
@@ -36,7 +36,8 @@ int main(int argc, char* argv[])
namespace po = boost::program_options;
- std::vector<armnn::Compute> computeDevice;
+ std::vector<armnn::BackendId> computeDevice;
+ std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef};
std::string modelDir;
std::string dataDir;
@@ -48,8 +49,7 @@ int main(int argc, char* argv[])
("help", "Display help messages")
("model-dir,m", po::value<std::string>(&modelDir)->required(),
"Path to directory containing the Cifar10 model file")
- ("compute,c", po::value<std::vector<armnn::Compute>>(&computeDevice)->default_value
- ({armnn::Compute::CpuAcc, armnn::Compute::CpuRef}),
+ ("compute,c", po::value<std::vector<armnn::BackendId>>(&computeDevice)->default_value(defaultBackends),
"Which device to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
("data-dir,d", po::value<std::string>(&dataDir)->required(),
"Path to directory containing the Cifar10 test data");
@@ -200,7 +200,7 @@ int main(int argc, char* argv[])
}
catch (const std::exception& e)
{
- // Coverity fix: various boost exceptions can be thrown by methods called by this test.
+ // Coverity fix: various boost exceptions can be thrown by methods called by this test.
std::cerr << "WARNING: MultipleNetworksCifar10: An error has occurred when running the "
"multiple networks inference tests: " << e.what() << std::endl;
return 1;