aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-01-31 16:44:26 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-02-01 09:10:51 +0000
commit339bcae73515c66899432b5844d7c239c570c4b8 (patch)
tree91bea3fc1eda5cf96309ad2a255917667ffad679 /tests
parent0c051f9b6b8d7d1602e81d2977dda449b6392642 (diff)
downloadarmnn-339bcae73515c66899432b5844d7c239c570c4b8.tar.gz
IVGCVSW-2604 Fix bug that made it impossible to execute inference tests on certain backends
* Read compute devices from the CL as strings and convert them into BackendId objects afterwards Change-Id: Icded1c572778f5a213644e3052ff6dfe7022128b Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp2
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp4
-rw-r--r--tests/InferenceModel.hpp27
-rw-r--r--tests/InferenceTest.inl2
-rw-r--r--tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp2
5 files changed, 24 insertions, 13 deletions
diff --git a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
index b752c7c98e..c6ffe3d989 100644
--- a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
+++ b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
@@ -35,7 +35,7 @@ int main(int argc, char* argv[])
modelParams.m_OutputBindings = { "fc12" };
modelParams.m_InputShapes = { inputTensorShape };
modelParams.m_IsModelBinary = true;
- modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice;
+ modelParams.m_ComputeDevices = modelOptions.GetComputeDevicesAsBackendIds();
modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index 29780104c2..afde9860e2 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -171,7 +171,7 @@ void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
template<typename TParser, typename TDataType>
int MainImpl(const char* modelPath,
bool isModelBinary,
- const std::vector<armnn::BackendId>& computeDevice,
+ const std::vector<armnn::BackendId>& computeDevices,
const char* inputName,
const armnn::TensorShape* inputTensorShape,
const char* inputTensorDataFilePath,
@@ -200,7 +200,7 @@ int MainImpl(const char* modelPath,
typename InferenceModel<TParser, TDataType>::Params params;
params.m_ModelPath = modelPath;
params.m_IsModelBinary = isModelBinary;
- params.m_ComputeDevice = computeDevice;
+ params.m_ComputeDevices = computeDevices;
params.m_InputBindings = { inputName };
params.m_InputShapes = { *inputTensorShape };
params.m_OutputBindings = { outputName };
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 1c89238d32..7e338669c7 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -16,6 +16,7 @@
#include <backendsCommon/BackendRegistry.hpp>
+#include <boost/algorithm/string/join.hpp>
#include <boost/exception/exception.hpp>
#include <boost/exception/diagnostic_information.hpp>
#include <boost/log/trivial.hpp>
@@ -24,6 +25,8 @@
#include <boost/filesystem.hpp>
#include <boost/lexical_cast.hpp>
+#include <algorithm>
+#include <iterator>
#include <fstream>
#include <map>
#include <string>
@@ -78,7 +81,7 @@ struct Params
std::vector<std::string> m_InputBindings;
std::vector<armnn::TensorShape> m_InputShapes;
std::vector<std::string> m_OutputBindings;
- std::vector<armnn::BackendId> m_ComputeDevice;
+ std::vector<armnn::BackendId> m_ComputeDevices;
bool m_EnableProfiling;
size_t m_SubgraphId;
bool m_IsModelBinary;
@@ -86,7 +89,7 @@ struct Params
bool m_EnableFp16TurboMode;
Params()
- : m_ComputeDevice{armnn::Compute::CpuRef}
+ : m_ComputeDevices{"CpuRef"}
, m_EnableProfiling(false)
, m_SubgraphId(0)
, m_IsModelBinary(true)
@@ -319,16 +322,23 @@ public:
struct CommandLineOptions
{
std::string m_ModelDir;
- std::vector<armnn::BackendId> m_ComputeDevice;
+ std::vector<std::string> m_ComputeDevices;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
+
+ std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
+ {
+ std::vector<armnn::BackendId> backendIds;
+ std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
+ return backendIds;
+ }
};
static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
{
namespace po = boost::program_options;
- std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef};
+ const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
+ armnn::BackendRegistryInstance().GetBackendIdsAsString();
@@ -336,8 +346,9 @@ public:
desc.add_options()
("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
"Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
- ("compute,c", po::value<std::vector<armnn::BackendId>>(&options.m_ComputeDevice)->default_value
- (defaultBackends), backendsMessage.c_str())
+ ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
+ default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
+ multitoken(), backendsMessage.c_str())
("visualize-optimized-model,v",
po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
"Produce a dot file useful for visualizing the graph post optimization."
@@ -362,7 +373,7 @@ public:
}
std::string invalidBackends;
- if (!CheckRequestedBackendsAreValid(params.m_ComputeDevice, armnn::Optional<std::string&>(invalidBackends)))
+ if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
{
throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
}
@@ -377,7 +388,7 @@ public:
armnn::OptimizerOptions options;
options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
- optNet = armnn::Optimize(*network, params.m_ComputeDevice, m_Runtime->GetDeviceSpec(), options);
+ optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
if (!optNet)
{
throw armnn::Exception("Optimize returned nullptr");
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 4dde35403d..07a20d5a13 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -342,7 +342,7 @@ int ClassifierInferenceTestMain(int argc,
}
modelParams.m_IsModelBinary = isModelBinary;
- modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice;
+ modelParams.m_ComputeDevices = modelOptions.GetComputeDevicesAsBackendIds();
modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
diff --git a/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp b/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp
index b1bc0f6120..3328339318 100644
--- a/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp
+++ b/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp
@@ -59,7 +59,7 @@ int main(int argc, char* argv[])
modelParams.m_InputShapes = { inputTensorShape };
modelParams.m_IsModelBinary = true;
- modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice;
+ modelParams.m_ComputeDevices = modelOptions.GetComputeDevicesAsBackendIds();
modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;