aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@Arm.com>2020-08-19 22:54:00 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-08-20 15:44:05 +0000
commit291a16b28522c75e1f44cb37f6b61585320da215 (patch)
tree496d97f713b3897e2b2b325be7a7ff92ec9f7bb1
parent81f27fdb53511c8f377ca2d82fc8436ebf675e76 (diff)
downloadandroid-nn-driver-291a16b28522c75e1f44cb37f6b61585320da215.tar.gz
IVGCVSW-5220 Set all operations to unsupported after encountering an unsupported operation with with dynamic inputs
Change-Id: Ia30698d608810dabb419d7d326ad985789d50603 Signed-off-by: Finn Williams <Finn.Williams@Arm.com>
-rw-r--r--ConversionUtils.hpp6
-rw-r--r--ModelToINetworkConverter.cpp26
2 files changed, 30 insertions, 2 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 27d07200..830502df 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -44,12 +44,14 @@ struct ConversionData
ConversionData(const std::vector<armnn::BackendId>& backends)
: m_Backends(backends)
, m_Network(nullptr, nullptr)
+ , m_DynamicInputsEncountered(false)
{}
const std::vector<armnn::BackendId> m_Backends;
armnn::INetworkPtr m_Network;
std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
std::vector<android::nn::RunTimePoolInfo> m_MemPools;
+ bool m_DynamicInputsEncountered;
};
class LayerInputHandle
@@ -1290,11 +1292,13 @@ LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetw
if (IsDynamicTensor(operandTensorInfo))
{
+ data.m_DynamicInputsEncountered = true;
+
const uint32_t operandIndex = operation.inputs[inputIndex];
// Check if the dynamic input tensors have been inferred by one of the previous layers
// If not we can't support them
- if(data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
+ if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
{
operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
}
diff --git a/ModelToINetworkConverter.cpp b/ModelToINetworkConverter.cpp
index 0d6ddc34..e8cf8a8b 100644
--- a/ModelToINetworkConverter.cpp
+++ b/ModelToINetworkConverter.cpp
@@ -110,7 +110,7 @@ void ModelToINetworkConverter<HalPolicy>::Convert()
Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what());
m_ConversionResult = ConversionResult::UnsupportedFeature;
}
-
+ bool UnsupportedDynamicOperation = false;
for (uint32_t operationIdx = 0; operationIdx < getMainModel(m_Model).operations.size(); operationIdx++)
{
const auto& operation = getMainModel(m_Model).operations[operationIdx];
@@ -147,9 +147,33 @@ void ModelToINetworkConverter<HalPolicy>::Convert()
// We still need to continue and check the other ones.
if (!ok)
{
+ if (m_Data.m_DynamicInputsEncountered)
+ {
+ Fail("%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx);
+ UnsupportedDynamicOperation = true;
+ }
+
m_ConversionResult = ConversionResult::UnsupportedFeature;
}
+ m_Data.m_DynamicInputsEncountered = false;
}
+
+ // Due to the NNAPI partitioner not supporting partition boundaries of unknown size,
+ // any operations who's outputs connect to an unsupported operation with with dynamic inputs
+ // will cause a failure.
+
+ // The simplest solution to this problem is to not support any operations in a model containing
+ // an unsupported operation with with dynamic inputs.
+ if (UnsupportedDynamicOperation)
+ {
+ Fail("%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported",
+ __func__);
+ for (auto& operation : m_OperationSupported)
+ {
+ operation.second = false;
+ }
+ }
+
try
{
if (m_ConversionResult == ConversionResult::Success)