From 291a16b28522c75e1f44cb37f6b61585320da215 Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Wed, 19 Aug 2020 22:54:00 +0100 Subject: IVGCVSW-5220 Set all operations to unsupported after encountering an unsupported operation with with dynamic inputs Change-Id: Ia30698d608810dabb419d7d326ad985789d50603 Signed-off-by: Finn Williams --- ConversionUtils.hpp | 6 +++++- ModelToINetworkConverter.cpp | 26 +++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 27d07200..830502df 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -44,12 +44,14 @@ struct ConversionData ConversionData(const std::vector& backends) : m_Backends(backends) , m_Network(nullptr, nullptr) + , m_DynamicInputsEncountered(false) {} const std::vector m_Backends; armnn::INetworkPtr m_Network; std::vector m_OutputSlotForOperand; std::vector m_MemPools; + bool m_DynamicInputsEncountered; }; class LayerInputHandle @@ -1290,11 +1292,13 @@ LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetw if (IsDynamicTensor(operandTensorInfo)) { + data.m_DynamicInputsEncountered = true; + const uint32_t operandIndex = operation.inputs[inputIndex]; // Check if the dynamic input tensors have been inferred by one of the previous layers // If not we can't support them - if(data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex]) + if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex]) { operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo(); } diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp index 0d6ddc34..e8cf8a8b 100644 --- a/ModelToINetworkConverter.cpp +++ b/ModelToINetworkConverter.cpp @@ -110,7 +110,7 @@ void ModelToINetworkConverter::Convert() Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what()); m_ConversionResult = ConversionResult::UnsupportedFeature; } - + bool UnsupportedDynamicOperation = false; for (uint32_t operationIdx = 0; operationIdx < getMainModel(m_Model).operations.size(); operationIdx++) { const auto& operation = getMainModel(m_Model).operations[operationIdx]; @@ -147,9 +147,33 @@ void ModelToINetworkConverter::Convert() // We still need to continue and check the other ones. if (!ok) { + if (m_Data.m_DynamicInputsEncountered) + { + Fail("%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx); + UnsupportedDynamicOperation = true; + } + m_ConversionResult = ConversionResult::UnsupportedFeature; } + m_Data.m_DynamicInputsEncountered = false; } + + // Due to the NNAPI partitioner not supporting partition boundaries of unknown size, + // any operations who's outputs connect to an unsupported operation with with dynamic inputs + // will cause a failure. + + // The simplest solution to this problem is to not support any operations in a model containing + // an unsupported operation with with dynamic inputs. + if (UnsupportedDynamicOperation) + { + Fail("%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported", + __func__); + for (auto& operation : m_OperationSupported) + { + operation.second = false; + } + } + try { if (m_ConversionResult == ConversionResult::Success) -- cgit v1.2.1