aboutsummaryrefslogtreecommitdiff
path: root/1.2
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-10 13:01:41 +0100
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-10 13:01:41 +0100
commit366e0a66f4566cf71dff3f850556350709ee66a8 (patch)
treef85badd07900acca485b6b66c8fd4b9b59f777fd /1.2
parentf03fcf0dd180ba2c87648a524fcca9214e1f979b (diff)
downloadandroid-nn-driver-366e0a66f4566cf71dff3f850556350709ee66a8.tar.gz
IVGCVSW-3482 Report operations with dynamic output size as unsupported
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Ifafe2a6fbfd6019b3395d51ed9967db794d2b034
Diffstat (limited to '1.2')
-rw-r--r--1.2/HalPolicy.cpp27
1 files changed, 18 insertions, 9 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index b194a57a..58fcf73c 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -173,6 +173,11 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
// ArmNN does not currently support non-fixed weights or bias
const ConstTensorPin weightsPin =
ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
@@ -442,6 +447,18 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
return Fail("%s: Could not read input 0", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
unsigned int rank = inputInfo.GetNumDimensions();
@@ -496,14 +513,6 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
}
- const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsPadSupported,
data.m_Backends,
@@ -543,7 +552,7 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
- if (outputInfo.GetNumElements() == 0u)
+ if (IsDynamicOutput(outputInfo))
{
ALOGD("Output shape not set, will infer from inputs");
outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));