aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-08-14 11:42:30 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-08-23 12:44:27 +0000
commit00dda4a66c10a56b02bdd534ba3b5fdb27527ebc (patch)
treecb752587ba5c2ea64e2de6ec402cbce7fc63981d
parente898db9aaf07b4d0ea0242a1f3296f0192c42939 (diff)
downloadarmnn-00dda4a66c10a56b02bdd534ba3b5fdb27527ebc.tar.gz
IVGCVSW-3547 Use ExecuteNetwork to run a dynamic backend end to end test
* Added path override for dynamic backend loading * Do not default to CpuRef, as there could be dynamic backends loaded at runtime * Do not check right away whether the backends are correct, as more of them can be loaded at runtime as dynamic backends Change-Id: If23f79aa1480b8dfce57e49b1746c23b6b9e6f82 Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
-rw-r--r--tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp4
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp24
-rw-r--r--tests/InferenceModel.hpp16
-rw-r--r--tests/InferenceTest.hpp6
-rw-r--r--tests/InferenceTest.inl4
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp19
-rw-r--r--tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp4
7 files changed, 50 insertions, 27 deletions
diff --git a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
index c136672c48..d563faaab2 100644
--- a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
+++ b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
@@ -40,7 +40,9 @@ int main(int argc, char* argv[])
modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
- return std::make_unique<YoloInferenceModel>(modelParams, commonOptions.m_EnableProfiling);
+ return std::make_unique<YoloInferenceModel>(modelParams,
+ commonOptions.m_EnableProfiling,
+ commonOptions.m_DynamicBackendsPath);
});
});
}
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index a8f3b3d71d..1a0306244b 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -27,6 +27,7 @@ int main(int argc, const char* argv[])
std::string outputNames;
std::string inputTypes;
std::string outputTypes;
+ std::string dynamicBackendsPath;
double thresholdTime = 0.0;
@@ -52,6 +53,9 @@ int main(int argc, const char* argv[])
".prototxt, .tflite, .onnx")
("compute,c", po::value<std::vector<std::string>>()->multitoken(),
backendsMessage.c_str())
+ ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
+ "Path where to load any available dynamic backend from. "
+ "If left empty (the default), dynamic backends will not be used.")
("input-name,i", po::value(&inputNames),
"Identifier of the input tensors in the network separated by comma.")
("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
@@ -62,7 +66,7 @@ int main(int argc, const char* argv[])
"This parameter is optional, depending on the network.")
("input-tensor-data,d", po::value(&inputTensorDataFilePaths),
"Path to files containing the input data as a flat array separated by whitespace. "
- "Several paths can be passed separating them by comma. ")
+ "Several paths can be passed separating them by comma.")
("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined inputs. "
"Accepted values (float, int or qasymm8)")
@@ -196,23 +200,15 @@ int main(int argc, const char* argv[])
{
// Get the preferred order of compute devices. If none are specified, default to using CpuRef
const std::string computeOption("compute");
- std::vector<std::string> computeDevicesAsStrings = CheckOption(vm, computeOption.c_str()) ?
- vm[computeOption].as<std::vector<std::string>>() :
- std::vector<std::string>({ "CpuRef" });
+ std::vector<std::string> computeDevicesAsStrings =
+ CheckOption(vm, computeOption.c_str()) ?
+ vm[computeOption].as<std::vector<std::string>>() :
+ std::vector<std::string>();
std::vector<armnn::BackendId> computeDevices(computeDevicesAsStrings.begin(), computeDevicesAsStrings.end());
// Remove duplicates from the list of compute devices.
RemoveDuplicateDevices(computeDevices);
- // Check that the specified compute devices are valid.
- std::string invalidBackends;
- if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
- {
- BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains invalid backend IDs: "
- << invalidBackends;
- return EXIT_FAILURE;
- }
-
try
{
CheckOptionDependencies(vm);
@@ -224,7 +220,7 @@ int main(int argc, const char* argv[])
return EXIT_FAILURE;
}
- return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames,
+ return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
}
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 4ad5872057..13e80319f4 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -86,13 +86,14 @@ struct Params
std::vector<armnn::TensorShape> m_InputShapes;
std::vector<std::string> m_OutputBindings;
std::vector<armnn::BackendId> m_ComputeDevices;
+ std::string m_DynamicBackendsPath;
size_t m_SubgraphId;
bool m_IsModelBinary;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
Params()
- : m_ComputeDevices{"CpuRef"}
+ : m_ComputeDevices{}
, m_SubgraphId(0)
, m_IsModelBinary(true)
, m_VisualizePostOptimizationModel(false)
@@ -318,6 +319,7 @@ public:
{
std::string m_ModelDir;
std::vector<std::string> m_ComputeDevices;
+ std::string m_DynamicBackendsPath;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
std::string m_Labels;
@@ -345,6 +347,9 @@ public:
("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
multitoken(), backendsMessage.c_str())
+ ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
+ "Path where to load any available dynamic backend from. "
+ "If left empty (the default), dynamic backends will not be used.")
("labels,l", po::value<std::string>(&options.m_Labels),
"Text file containing one image filename - correct label pair per line, "
"used to test the accuracy of the network.")
@@ -359,8 +364,10 @@ public:
InferenceModel(const Params& params,
bool enableProfiling,
+ const std::string& dynamicBackendsPath,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
: m_EnableProfiling(enableProfiling)
+ , m_DynamicBackendsPath(dynamicBackendsPath)
{
if (runtime)
{
@@ -370,6 +377,7 @@ public:
{
armnn::IRuntime::CreationOptions options;
options.m_EnableGpuProfiling = m_EnableProfiling;
+ options.m_DynamicBackendsPath = m_DynamicBackendsPath;
m_Runtime = std::move(armnn::IRuntime::Create(options));
}
@@ -379,10 +387,9 @@ public:
throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
}
- armnn::INetworkPtr network =
- CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
+ armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
- armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}};
+ armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
{
ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
@@ -544,6 +551,7 @@ private:
std::vector<armnn::BindingPointInfo> m_InputBindings;
std::vector<armnn::BindingPointInfo> m_OutputBindings;
bool m_EnableProfiling;
+ std::string m_DynamicBackendsPath;
template<typename TContainer>
armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 40c9e5e597..f2b8c634cc 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -58,10 +58,12 @@ struct InferenceTestOptions
unsigned int m_IterationCount;
std::string m_InferenceTimesFile;
bool m_EnableProfiling;
+ std::string m_DynamicBackendsPath;
InferenceTestOptions()
- : m_IterationCount(0),
- m_EnableProfiling(0)
+ : m_IterationCount(0)
+ , m_EnableProfiling(0)
+ , m_DynamicBackendsPath()
{}
};
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index 04cae99132..c91193f187 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -397,7 +397,9 @@ int ClassifierInferenceTestMain(int argc,
modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
- return std::make_unique<InferenceModel>(modelParams, commonOptions.m_EnableProfiling);
+ return std::make_unique<InferenceModel>(modelParams,
+ commonOptions.m_EnableProfiling,
+ commonOptions.m_DynamicBackendsPath);
});
});
}
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 440dcf9aa8..810f499a9c 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -254,6 +254,7 @@ template<typename TParser, typename TDataType>
int MainImpl(const char* modelPath,
bool isModelBinary,
const std::vector<armnn::BackendId>& computeDevices,
+ const std::string& dynamicBackendsPath,
const std::vector<string>& inputNames,
const std::vector<std::unique_ptr<armnn::TensorShape>>& inputTensorShapes,
const std::vector<string>& inputTensorDataFilePaths,
@@ -278,6 +279,7 @@ int MainImpl(const char* modelPath,
params.m_ModelPath = modelPath;
params.m_IsModelBinary = isModelBinary;
params.m_ComputeDevices = computeDevices;
+ params.m_DynamicBackendsPath = dynamicBackendsPath;
for(const std::string& inputName: inputNames)
{
@@ -296,7 +298,7 @@ int MainImpl(const char* modelPath,
params.m_SubgraphId = subgraphId;
params.m_EnableFp16TurboMode = enableFp16TurboMode;
- InferenceModel<TParser, TDataType> model(params, enableProfiling, runtime);
+ InferenceModel<TParser, TDataType> model(params, enableProfiling, dynamicBackendsPath, runtime);
for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i)
{
@@ -407,6 +409,7 @@ int MainImpl(const char* modelPath,
int RunTest(const std::string& format,
const std::string& inputTensorShapesStr,
const vector<armnn::BackendId>& computeDevice,
+ const std::string& dynamicBackendsPath,
const std::string& path,
const std::string& inputNames,
const std::string& inputTensorDataFilePaths,
@@ -513,7 +516,7 @@ int RunTest(const std::string& format,
#if defined(ARMNN_SERIALIZER)
return MainImpl<armnnDeserializer::IDeserializer, float>(
modelPath.c_str(), isModelBinary, computeDevice,
- inputNamesVector, inputTensorShapes,
+ dynamicBackendsPath, inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector, quantizeInput,
outputTypesVector, outputNamesVector, enableProfiling,
enableFp16TurboMode, thresholdTime, subgraphId, runtime);
@@ -526,6 +529,7 @@ int RunTest(const std::string& format,
{
#if defined(ARMNN_CAFFE_PARSER)
return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ dynamicBackendsPath,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
@@ -540,6 +544,7 @@ int RunTest(const std::string& format,
{
#if defined(ARMNN_ONNX_PARSER)
return MainImpl<armnnOnnxParser::IOnnxParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ dynamicBackendsPath,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
@@ -554,6 +559,7 @@ int RunTest(const std::string& format,
{
#if defined(ARMNN_TF_PARSER)
return MainImpl<armnnTfParser::ITfParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ dynamicBackendsPath,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
@@ -574,6 +580,7 @@ int RunTest(const std::string& format,
return EXIT_FAILURE;
}
return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ dynamicBackendsPath,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
@@ -604,6 +611,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
std::string outputNames;
std::string inputTypes;
std::string outputTypes;
+ std::string dynamicBackendsPath;
size_t subgraphId = 0;
@@ -622,6 +630,9 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
".tflite, .onnx")
("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
backendsMessage.c_str())
+ ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
+ "Path where to load any available dynamic backend from. "
+ "If left empty (the default), dynamic backends will not be used.")
("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
"executed. Defaults to 0.")
@@ -696,7 +707,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
return EXIT_FAILURE;
}
- return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames,
+ return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
-} \ No newline at end of file
+}
diff --git a/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp b/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp
index 06196475bf..787102ed45 100644
--- a/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp
+++ b/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp
@@ -64,7 +64,9 @@ int main(int argc, char* argv[])
modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
- return std::make_unique<Model>(modelParams, commonOptions.m_EnableProfiling);
+ return std::make_unique<Model>(modelParams,
+ commonOptions.m_EnableProfiling,
+ commonOptions.m_DynamicBackendsPath);
});
});
}