From 00dda4a66c10a56b02bdd534ba3b5fdb27527ebc Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Wed, 14 Aug 2019 11:42:30 +0100 Subject: IVGCVSW-3547 Use ExecuteNetwork to run a dynamic backend end to end test * Added path override for dynamic backend loading * Do not default to CpuRef, as there could be dynamic backends loaded at runtime * Do not check right away whether the backends are correct, as more of them can be loaded at runtime as dynamic backends Change-Id: If23f79aa1480b8dfce57e49b1746c23b6b9e6f82 Signed-off-by: Matteo Martincigh --- tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp | 4 +++- tests/ExecuteNetwork/ExecuteNetwork.cpp | 24 +++++++++------------- tests/InferenceModel.hpp | 16 +++++++++++---- tests/InferenceTest.hpp | 6 ++++-- tests/InferenceTest.inl | 4 +++- .../NetworkExecutionUtils.hpp | 19 +++++++++++++---- .../TfLiteMobileNetSsd-Armnn.cpp | 4 +++- 7 files changed, 50 insertions(+), 27 deletions(-) diff --git a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp index c136672c48..d563faaab2 100644 --- a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp +++ b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp @@ -40,7 +40,9 @@ int main(int argc, char* argv[]) modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel; modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode; - return std::make_unique(modelParams, commonOptions.m_EnableProfiling); + return std::make_unique(modelParams, + commonOptions.m_EnableProfiling, + commonOptions.m_DynamicBackendsPath); }); }); } diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index a8f3b3d71d..1a0306244b 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -27,6 +27,7 @@ int main(int argc, const char* argv[]) std::string outputNames; std::string inputTypes; std::string outputTypes; + std::string dynamicBackendsPath; double thresholdTime = 0.0; @@ -52,6 +53,9 @@ int main(int argc, const char* argv[]) ".prototxt, .tflite, .onnx") ("compute,c", po::value>()->multitoken(), backendsMessage.c_str()) + ("dynamic-backends-path,b", po::value(&dynamicBackendsPath), + "Path where to load any available dynamic backend from. " + "If left empty (the default), dynamic backends will not be used.") ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.") ("subgraph-number,x", po::value(&subgraphId)->default_value(0), "Id of the subgraph to be executed." @@ -62,7 +66,7 @@ int main(int argc, const char* argv[]) "This parameter is optional, depending on the network.") ("input-tensor-data,d", po::value(&inputTensorDataFilePaths), "Path to files containing the input data as a flat array separated by whitespace. " - "Several paths can be passed separating them by comma. ") + "Several paths can be passed separating them by comma.") ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined inputs. " "Accepted values (float, int or qasymm8)") @@ -196,23 +200,15 @@ int main(int argc, const char* argv[]) { // Get the preferred order of compute devices. If none are specified, default to using CpuRef const std::string computeOption("compute"); - std::vector computeDevicesAsStrings = CheckOption(vm, computeOption.c_str()) ? - vm[computeOption].as>() : - std::vector({ "CpuRef" }); + std::vector computeDevicesAsStrings = + CheckOption(vm, computeOption.c_str()) ? + vm[computeOption].as>() : + std::vector(); std::vector computeDevices(computeDevicesAsStrings.begin(), computeDevicesAsStrings.end()); // Remove duplicates from the list of compute devices. RemoveDuplicateDevices(computeDevices); - // Check that the specified compute devices are valid. - std::string invalidBackends; - if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional(invalidBackends))) - { - BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains invalid backend IDs: " - << invalidBackends; - return EXIT_FAILURE; - } - try { CheckOptionDependencies(vm); @@ -224,7 +220,7 @@ int main(int argc, const char* argv[]) return EXIT_FAILURE; } - return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames, + return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId); } diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 4ad5872057..13e80319f4 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -86,13 +86,14 @@ struct Params std::vector m_InputShapes; std::vector m_OutputBindings; std::vector m_ComputeDevices; + std::string m_DynamicBackendsPath; size_t m_SubgraphId; bool m_IsModelBinary; bool m_VisualizePostOptimizationModel; bool m_EnableFp16TurboMode; Params() - : m_ComputeDevices{"CpuRef"} + : m_ComputeDevices{} , m_SubgraphId(0) , m_IsModelBinary(true) , m_VisualizePostOptimizationModel(false) @@ -318,6 +319,7 @@ public: { std::string m_ModelDir; std::vector m_ComputeDevices; + std::string m_DynamicBackendsPath; bool m_VisualizePostOptimizationModel; bool m_EnableFp16TurboMode; std::string m_Labels; @@ -345,6 +347,9 @@ public: ("compute,c", po::value>(&options.m_ComputeDevices)-> default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))-> multitoken(), backendsMessage.c_str()) + ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath), + "Path where to load any available dynamic backend from. " + "If left empty (the default), dynamic backends will not be used.") ("labels,l", po::value(&options.m_Labels), "Text file containing one image filename - correct label pair per line, " "used to test the accuracy of the network.") @@ -359,8 +364,10 @@ public: InferenceModel(const Params& params, bool enableProfiling, + const std::string& dynamicBackendsPath, const std::shared_ptr& runtime = nullptr) : m_EnableProfiling(enableProfiling) + , m_DynamicBackendsPath(dynamicBackendsPath) { if (runtime) { @@ -370,6 +377,7 @@ public: { armnn::IRuntime::CreationOptions options; options.m_EnableGpuProfiling = m_EnableProfiling; + options.m_DynamicBackendsPath = m_DynamicBackendsPath; m_Runtime = std::move(armnn::IRuntime::Create(options)); } @@ -379,10 +387,9 @@ public: throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends); } - armnn::INetworkPtr network = - CreateNetworkImpl::Create(params, m_InputBindings, m_OutputBindings); + armnn::INetworkPtr network = CreateNetworkImpl::Create(params, m_InputBindings, m_OutputBindings); - armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}}; + armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}}; { ARMNN_SCOPED_HEAP_PROFILING("Optimizing"); @@ -544,6 +551,7 @@ private: std::vector m_InputBindings; std::vector m_OutputBindings; bool m_EnableProfiling; + std::string m_DynamicBackendsPath; template armnn::InputTensors MakeInputTensors(const std::vector& inputDataContainers) diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp index 40c9e5e597..f2b8c634cc 100644 --- a/tests/InferenceTest.hpp +++ b/tests/InferenceTest.hpp @@ -58,10 +58,12 @@ struct InferenceTestOptions unsigned int m_IterationCount; std::string m_InferenceTimesFile; bool m_EnableProfiling; + std::string m_DynamicBackendsPath; InferenceTestOptions() - : m_IterationCount(0), - m_EnableProfiling(0) + : m_IterationCount(0) + , m_EnableProfiling(0) + , m_DynamicBackendsPath() {} }; diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl index 04cae99132..c91193f187 100644 --- a/tests/InferenceTest.inl +++ b/tests/InferenceTest.inl @@ -397,7 +397,9 @@ int ClassifierInferenceTestMain(int argc, modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel; modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode; - return std::make_unique(modelParams, commonOptions.m_EnableProfiling); + return std::make_unique(modelParams, + commonOptions.m_EnableProfiling, + commonOptions.m_DynamicBackendsPath); }); }); } diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp index 440dcf9aa8..810f499a9c 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp @@ -254,6 +254,7 @@ template int MainImpl(const char* modelPath, bool isModelBinary, const std::vector& computeDevices, + const std::string& dynamicBackendsPath, const std::vector& inputNames, const std::vector>& inputTensorShapes, const std::vector& inputTensorDataFilePaths, @@ -278,6 +279,7 @@ int MainImpl(const char* modelPath, params.m_ModelPath = modelPath; params.m_IsModelBinary = isModelBinary; params.m_ComputeDevices = computeDevices; + params.m_DynamicBackendsPath = dynamicBackendsPath; for(const std::string& inputName: inputNames) { @@ -296,7 +298,7 @@ int MainImpl(const char* modelPath, params.m_SubgraphId = subgraphId; params.m_EnableFp16TurboMode = enableFp16TurboMode; - InferenceModel model(params, enableProfiling, runtime); + InferenceModel model(params, enableProfiling, dynamicBackendsPath, runtime); for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i) { @@ -407,6 +409,7 @@ int MainImpl(const char* modelPath, int RunTest(const std::string& format, const std::string& inputTensorShapesStr, const vector& computeDevice, + const std::string& dynamicBackendsPath, const std::string& path, const std::string& inputNames, const std::string& inputTensorDataFilePaths, @@ -513,7 +516,7 @@ int RunTest(const std::string& format, #if defined(ARMNN_SERIALIZER) return MainImpl( modelPath.c_str(), isModelBinary, computeDevice, - inputNamesVector, inputTensorShapes, + dynamicBackendsPath, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId, runtime); @@ -526,6 +529,7 @@ int RunTest(const std::string& format, { #if defined(ARMNN_CAFFE_PARSER) return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, + dynamicBackendsPath, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, @@ -540,6 +544,7 @@ int RunTest(const std::string& format, { #if defined(ARMNN_ONNX_PARSER) return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, + dynamicBackendsPath, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, @@ -554,6 +559,7 @@ int RunTest(const std::string& format, { #if defined(ARMNN_TF_PARSER) return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, + dynamicBackendsPath, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, @@ -574,6 +580,7 @@ int RunTest(const std::string& format, return EXIT_FAILURE; } return MainImpl(modelPath.c_str(), isModelBinary, computeDevice, + dynamicBackendsPath, inputNamesVector, inputTensorShapes, inputTensorDataFilePathsVector, inputTypesVector, quantizeInput, outputTypesVector, outputNamesVector, @@ -604,6 +611,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr>()->multitoken(), backendsMessage.c_str()) + ("dynamic-backends-path,b", po::value(&dynamicBackendsPath), + "Path where to load any available dynamic backend from. " + "If left empty (the default), dynamic backends will not be used.") ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.") ("subgraph-number,n", po::value(&subgraphId)->default_value(0), "Id of the subgraph to be " "executed. Defaults to 0.") @@ -696,7 +707,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr(modelParams, commonOptions.m_EnableProfiling); + return std::make_unique(modelParams, + commonOptions.m_EnableProfiling, + commonOptions.m_DynamicBackendsPath); }); }); } -- cgit v1.2.1