aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-10 13:01:41 +0100
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-10 13:01:41 +0100
commit366e0a66f4566cf71dff3f850556350709ee66a8 (patch)
treef85badd07900acca485b6b66c8fd4b9b59f777fd
parentf03fcf0dd180ba2c87648a524fcca9214e1f979b (diff)
downloadandroid-nn-driver-366e0a66f4566cf71dff3f850556350709ee66a8.tar.gz
IVGCVSW-3482 Report operations with dynamic output size as unsupported
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Ifafe2a6fbfd6019b3395d51ed9967db794d2b034
-rw-r--r--1.0/HalPolicy.cpp22
-rw-r--r--1.1/HalPolicy.cpp16
-rw-r--r--1.2/HalPolicy.cpp27
-rw-r--r--Android.mk2
-rw-r--r--OutputShapeUtils.cpp5
-rw-r--r--OutputShapeUtils.hpp2
6 files changed, 59 insertions, 15 deletions
diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp
index 9673a74c..2149d40f 100644
--- a/1.0/HalPolicy.cpp
+++ b/1.0/HalPolicy.cpp
@@ -8,6 +8,7 @@
#include <armnn/Optional.hpp>
#include "FullyConnected.hpp"
+#include "OutputShapeUtils.hpp"
namespace armnn_driver
{
@@ -388,11 +389,17 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model
return Fail("%s: Operation has invalid outputs", __func__);
}
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsDequantizeSupported,
data.m_Backends,
input.GetTensorInfo(),
- GetTensorInfoForOperand(*outputOperand)))
+ outputInfo))
{
return false;
}
@@ -957,6 +964,11 @@ bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model&
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
armnn::L2NormalizationDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NHWC;
@@ -1082,7 +1094,11 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
return Fail("%s: Operation has no outputs", __func__);
}
- const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
armnn::SoftmaxDescriptor desc;
if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
@@ -1094,7 +1110,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C
armnn::IsSoftmaxSupported,
data.m_Backends,
input.GetTensorInfo(),
- outInfo,
+ outputInfo,
desc))
{
return false;
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 78f157dd..dbd380ab 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -5,6 +5,8 @@
#include "HalPolicy.hpp"
+#include "OutputShapeUtils.hpp"
+
#include "../1.0/HalPolicy.hpp"
namespace
@@ -176,20 +178,24 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
return false;
}
- const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsSubtractionSupported,
data.m_Backends,
input0.GetTensorInfo(),
input1.GetTensorInfo(),
- outInfo))
+ outputInfo))
{
return false;
}
armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
- armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
+ armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
@@ -292,6 +298,10 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
}
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsPadSupported,
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index b194a57a..58fcf73c 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -173,6 +173,11 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
// ArmNN does not currently support non-fixed weights or bias
const ConstTensorPin weightsPin =
ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
@@ -442,6 +447,18 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
return Fail("%s: Could not read input 0", __func__);
}
+ const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output", __func__);
+ }
+
+ const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ return Fail("%s: Dynamic output not supported", __func__);
+ }
+
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
unsigned int rank = inputInfo.GetNumDimensions();
@@ -496,14 +513,6 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
}
- const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
- if (!output)
- {
- return Fail("%s: Could not read output", __func__);
- }
-
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
-
if (!IsLayerSupportedForAnyBackend(__func__,
armnn::IsPadSupported,
data.m_Backends,
@@ -543,7 +552,7 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con
const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
- if (outputInfo.GetNumElements() == 0u)
+ if (IsDynamicOutput(outputInfo))
{
ALOGD("Output shape not set, will infer from inputs");
outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
diff --git a/Android.mk b/Android.mk
index 215b0a84..9bbee43d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -114,6 +114,7 @@ LOCAL_SRC_FILES := \
ArmnnDevice.cpp \
ArmnnPreparedModel.cpp \
ModelToINetworkConverter.cpp \
+ OutputShapeUtils.cpp \
RequestThread.cpp \
Utils.cpp \
ConversionUtils.cpp
@@ -227,6 +228,7 @@ LOCAL_SRC_FILES := \
ArmnnDevice.cpp \
ArmnnPreparedModel.cpp \
ModelToINetworkConverter.cpp \
+ OutputShapeUtils.cpp \
RequestThread.cpp \
Utils.cpp \
ConversionUtils.cpp
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp
index de27630e..a0c624c8 100644
--- a/OutputShapeUtils.cpp
+++ b/OutputShapeUtils.cpp
@@ -12,6 +12,11 @@ namespace armnn_driver
using namespace armnn;
+bool IsDynamicOutput(const TensorInfo& outputInfo)
+{
+ return outputInfo.GetNumElements() == 0u;
+}
+
TensorShape InferPreluOutputShape(const TensorShape& inputShape, const TensorShape& alphaShape)
{
// NOTE: The inferred PReLU output size will be the maximum size along each dimension
diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp
index f314252f..6e2a76db 100644
--- a/OutputShapeUtils.hpp
+++ b/OutputShapeUtils.hpp
@@ -10,6 +10,8 @@
namespace armnn_driver
{
+bool IsDynamicOutput(const armnn::TensorInfo& outputInfo);
+
armnn::TensorShape InferPreluOutputShape(const armnn::TensorShape& inputShape, const armnn::TensorShape& alphaShape);
} // namespace armnn_driver