From cb8e350983fa3c7ad3c63538aa7573a61ad6af75 Mon Sep 17 00:00:00 2001 From: Keith Davis Date: Thu, 12 Nov 2020 10:27:19 +0000 Subject: IVGCVSW-5465 ExecuteNetworkTestsDynamicBackends Bug Fix * When invalid backend specified an ARMNNLOG should be invoked to fail more gracefully Signed-off-by: Keith Davis Change-Id: Iec34fbc03dbeeb66836b0d5e1dd381d021a379b1 --- tests/ExecuteNetwork/ExecuteNetworkParams.cpp | 154 ++++++++++++++------------ 1 file changed, 83 insertions(+), 71 deletions(-) diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp index ccf80648fe..a3a7f6a753 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp @@ -37,8 +37,8 @@ void CheckModelFormat(const std::string& modelFormat) { #if defined(ARMNN_SERIALIZER) #else - throw armnn::InvalidArgumentException("Can't run model in armnn format without a " - "built with serialization support."); + throw armnn::InvalidArgumentException("Can't run model in armnn format without a " + "built with serialization support."); #endif } else if (modelFormat.find("caffe") != std::string::npos) @@ -65,13 +65,13 @@ void CheckModelFormat(const std::string& modelFormat) "built with Tensorflow parser support."); #endif } - else if(modelFormat.find("tflite") != std::string::npos) + else if (modelFormat.find("tflite") != std::string::npos) { #if defined(ARMNN_TF_LITE_PARSER) if (!IsModelBinary(modelFormat)) { - throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. Only 'binary' format " - "supported for tflite files", + throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. Only 'binary' " + "format supported for tflite files", modelFormat)); } #else @@ -93,10 +93,10 @@ void CheckClTuningParameter(const int& tuningLevel, { if (!tuningPath.empty()) { - if(tuningLevel == 0) + if (tuningLevel == 0) { ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n"; - if(!ValidatePath(tuningPath, true)) + if (!ValidatePath(tuningPath, true)) { throw armnn::InvalidArgumentException("The tuning path is not valid"); } @@ -108,7 +108,8 @@ void CheckClTuningParameter(const int& tuningLevel, } else if ((0 < tuningLevel) || (tuningLevel > 3)) { - throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.", tuningLevel)); + throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.", + tuningLevel)); } // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted @@ -120,93 +121,104 @@ void CheckClTuningParameter(const int& tuningLevel, } } - } void ExecuteNetworkParams::ValidateParams() { - if (m_DynamicBackendsPath=="") + // Set to true if it is preferred to throw an exception rather than use ARMNN_LOG + bool throwExc = false; + + try { - // Check compute devices are valid unless they are dynamically loaded at runtime - std::string invalidBackends; - if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional(invalidBackends))) + if (m_DynamicBackendsPath == "") { - throw armnn::InvalidArgumentException( - fmt::format("Some of the requested compute devices are invalid. " - "\nInvalid devices: {} \nAvailable devices are: {}", - invalidBackends, - armnn::BackendRegistryInstance().GetBackendIdsAsString())); + // Check compute devices are valid unless they are dynamically loaded at runtime + std::string invalidBackends; + if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional(invalidBackends))) + { + ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: " + << invalidBackends; + } } - } - CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices); + CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices); - // Check turbo modes - if (m_EnableBf16TurboMode && m_EnableFp16TurboMode) - { - throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be enabled at the same time."); - } + if (m_EnableBf16TurboMode && m_EnableFp16TurboMode) + { + ARMNN_LOG(fatal) << "BFloat16 and Float16 turbo mode cannot be enabled at the same time."; + } - m_IsModelBinary = IsModelBinary(m_ModelFormat); + m_IsModelBinary = IsModelBinary(m_ModelFormat); - CheckModelFormat(m_ModelFormat); + CheckModelFormat(m_ModelFormat); - // Check input tensor shapes - if ((m_InputTensorShapes.size() != 0) && - (m_InputTensorShapes.size() != m_InputNames.size())) - { - throw armnn::InvalidArgumentException("input-name and input-tensor-shape must " - "have the same amount of elements."); - } + // Check input tensor shapes + if ((m_InputTensorShapes.size() != 0) && + (m_InputTensorShapes.size() != m_InputNames.size())) + { + ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements. "; + } - if (m_InputTensorDataFilePaths.size() != 0) - { - if (!ValidatePaths(m_InputTensorDataFilePaths, true)) + if (m_InputTensorDataFilePaths.size() != 0) { - throw armnn::InvalidArgumentException("One or more input data file paths are not valid."); + if (!ValidatePaths(m_InputTensorDataFilePaths, true)) + { + ARMNN_LOG(fatal) << "One or more input data file paths are not valid. "; + } + + if (m_InputTensorDataFilePaths.size() != m_InputNames.size()) + { + ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements. "; + } } - if (m_InputTensorDataFilePaths.size() != m_InputNames.size()) + if ((m_OutputTensorFiles.size() != 0) && + (m_OutputTensorFiles.size() != m_OutputNames.size())) { - throw armnn::InvalidArgumentException("input-name and input-tensor-data must have " - "the same amount of elements."); + ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements. "; } - } - if ((m_OutputTensorFiles.size() != 0) && - (m_OutputTensorFiles.size() != m_OutputNames.size())) - { - throw armnn::InvalidArgumentException("output-name and write-outputs-to-file must have the " - "same amount of elements."); - } + if (m_InputTypes.size() == 0) + { + //Defaults the value of all inputs to "float" + m_InputTypes.assign(m_InputNames.size(), "float"); + } + else if ((m_InputTypes.size() != 0) && + (m_InputTypes.size() != m_InputNames.size())) + { + ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements."; + } - if (m_InputTypes.size() == 0) - { - //Defaults the value of all inputs to "float" - m_InputTypes.assign(m_InputNames.size(), "float"); - } - else if ((m_InputTypes.size() != 0) && - (m_InputTypes.size() != m_InputNames.size())) - { - throw armnn::InvalidArgumentException("input-name and input-type must have the same amount of elements."); - } + if (m_OutputTypes.size() == 0) + { + //Defaults the value of all outputs to "float" + m_OutputTypes.assign(m_OutputNames.size(), "float"); + } + else if ((m_OutputTypes.size() != 0) && + (m_OutputTypes.size() != m_OutputNames.size())) + { + ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements."; + } - if (m_OutputTypes.size() == 0) - { - //Defaults the value of all outputs to "float" - m_OutputTypes.assign(m_OutputNames.size(), "float"); - } - else if ((m_OutputTypes.size() != 0) && - (m_OutputTypes.size() != m_OutputNames.size())) - { - throw armnn::InvalidArgumentException("output-name and output-type must have the same amount of elements."); + // Check that threshold time is not less than zero + if (m_ThresholdTime < 0) + { + ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero."; + } } - - // Check that threshold time is not less than zero - if (m_ThresholdTime < 0) + catch (std::string& exc) { - throw armnn::InvalidArgumentException("Threshold time supplied as a command line argument is less than zero."); + if (throwExc) + { + throw armnn::InvalidArgumentException(exc); + } + else + { + std::cout << exc; + exit(EXIT_FAILURE); + } } + // Check turbo modes // Warn if ExecuteNetwork will generate dummy input data if (m_GenerateTensorData) -- cgit v1.2.1