diff options
-rw-r--r-- | ConversionUtils.hpp | 6 | ||||
-rw-r--r-- | ModelToINetworkConverter.cpp | 26 |
2 files changed, 30 insertions, 2 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 27d07200..830502df 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -44,12 +44,14 @@ struct ConversionData ConversionData(const std::vector<armnn::BackendId>& backends) : m_Backends(backends) , m_Network(nullptr, nullptr) + , m_DynamicInputsEncountered(false) {} const std::vector<armnn::BackendId> m_Backends; armnn::INetworkPtr m_Network; std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand; std::vector<android::nn::RunTimePoolInfo> m_MemPools; + bool m_DynamicInputsEncountered; }; class LayerInputHandle @@ -1290,11 +1292,13 @@ LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetw if (IsDynamicTensor(operandTensorInfo)) { + data.m_DynamicInputsEncountered = true; + const uint32_t operandIndex = operation.inputs[inputIndex]; // Check if the dynamic input tensors have been inferred by one of the previous layers // If not we can't support them - if(data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex]) + if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex]) { operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo(); } diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp index 0d6ddc34..e8cf8a8b 100644 --- a/ModelToINetworkConverter.cpp +++ b/ModelToINetworkConverter.cpp @@ -110,7 +110,7 @@ void ModelToINetworkConverter<HalPolicy>::Convert() Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what()); m_ConversionResult = ConversionResult::UnsupportedFeature; } - + bool UnsupportedDynamicOperation = false; for (uint32_t operationIdx = 0; operationIdx < getMainModel(m_Model).operations.size(); operationIdx++) { const auto& operation = getMainModel(m_Model).operations[operationIdx]; @@ -147,9 +147,33 @@ void ModelToINetworkConverter<HalPolicy>::Convert() // We still need to continue and check the other ones. if (!ok) { + if (m_Data.m_DynamicInputsEncountered) + { + Fail("%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx); + UnsupportedDynamicOperation = true; + } + m_ConversionResult = ConversionResult::UnsupportedFeature; } + m_Data.m_DynamicInputsEncountered = false; } + + // Due to the NNAPI partitioner not supporting partition boundaries of unknown size, + // any operations who's outputs connect to an unsupported operation with with dynamic inputs + // will cause a failure. + + // The simplest solution to this problem is to not support any operations in a model containing + // an unsupported operation with with dynamic inputs. + if (UnsupportedDynamicOperation) + { + Fail("%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported", + __func__); + for (auto& operation : m_OperationSupported) + { + operation.second = false; + } + } + try { if (m_ConversionResult == ConversionResult::Success) |