aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2019-07-12 13:55:57 +0100
committerSadik Armagan <sadik.armagan@arm.com>2019-07-12 13:55:57 +0100
commit5e9521ce5eac717b25844f9da4849b1fc2082f76 (patch)
tree05f5d7bc6656033784996c7ee08501789c9a5616
parent6bda94a88536e8abb487a6ab29da4b8cfc8034e1 (diff)
downloadandroid-nn-driver-5e9521ce5eac717b25844f9da4849b1fc2082f76.tar.gz
IVGCVSW-3458 Fix VTS sub dynamic_output_shape test failures
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Ia6e2a082ed4a0255d8c9c10375590553e6ede9f0
-rw-r--r--1.1/HalPolicy.cpp12
-rw-r--r--OutputShapeUtils.cpp64
-rw-r--r--OutputShapeUtils.hpp2
3 files changed, 52 insertions, 26 deletions
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index 9f2c9be1..fbd2e08e 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -178,10 +178,11 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
return false;
}
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+ armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
if (IsDynamicOutput(outputInfo))
{
- return Fail("%s: Dynamic output not supported", __func__);
+ ALOGD("Output shape not set, will infer from inputs");
+ outputInfo.SetShape(InferSubOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
}
if (!IsLayerSupportedForAnyBackend(__func__,
@@ -203,7 +204,12 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve
if (endLayer)
{
BroadcastTensor(input0, input1, startLayer, *data.m_Network);
- return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation,
+ 0,
+ *endLayer,
+ model,
+ data,
+ armnn::Optional<armnn::TensorInfo>(outputInfo));
}
return Fail("%s: ProcessActivation failed", __func__);
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp
index a1a49d70..739038ac 100644
--- a/OutputShapeUtils.cpp
+++ b/OutputShapeUtils.cpp
@@ -8,6 +8,42 @@
#include <algorithm>
#include <vector>
+namespace
+{
+
+using namespace armnn;
+
+TensorShape CalculateMaxShape(const TensorShape& inShape0, const TensorShape& inShape1)
+{
+ // NOTE: The inferred output size will be the maximum size along each dimension
+ // of inShape0 and inShape1, starting with the trailing dimensions, and working its way forward.
+ //
+ // Example: inShape0={4, 1, 2}, inShape1={5, 4, 3, 1} => outputShape={5, 4, 3, 2}
+
+ const unsigned int numInput0Dims = inShape0.GetNumDimensions();
+ const unsigned int numInput1Dims = inShape1.GetNumDimensions();
+
+ const unsigned int maxNumDims = std::max(numInput0Dims, numInput1Dims);
+
+ TensorShape outputShape = TensorShape(maxNumDims);
+ for (unsigned int reverseIdx = 1u; reverseIdx <= maxNumDims; ++reverseIdx)
+ {
+ const int input0Idx = numInput0Dims - reverseIdx;
+ const int input1Idx = numInput1Dims - reverseIdx;
+
+ const unsigned int input0DimSize = input0Idx >= 0 ? inShape0[input0Idx] : 0u;
+ const unsigned int input1DimSize = input1Idx >= 0 ? inShape1[input1Idx] : 0u;
+
+ const unsigned int outputIdx = maxNumDims - reverseIdx;
+ outputShape[outputIdx] = std::max(input0DimSize, input1DimSize);
+ }
+
+ return outputShape;
+}
+
+} // namespace annonymous
+
+
namespace armnn_driver
{
@@ -38,30 +74,12 @@ TensorShape InferPadOutputShape(const TensorShape& inputShape,
TensorShape InferPreluOutputShape(const TensorShape& inputShape, const TensorShape& alphaShape)
{
- // NOTE: The inferred PReLU output size will be the maximum size along each dimension
- // of input and alpha, starting with the trailing dimensions, and working its way forward.
- //
- // Example: inputShape={4, 1, 2}, alphaShape={5, 4, 3, 1} => outputShape={5, 4, 3, 2}
-
- const unsigned int numInputDims = inputShape.GetNumDimensions();
- const unsigned int numAlphaDims = alphaShape.GetNumDimensions();
-
- const unsigned int maxNumDims = std::max(numInputDims, numAlphaDims);
-
- TensorShape outputShape = TensorShape(maxNumDims);
- for (unsigned int reverseIdx = 1u; reverseIdx <= maxNumDims; ++reverseIdx)
- {
- const int inputIdx = numInputDims - reverseIdx;
- const int alphaIdx = numAlphaDims - reverseIdx;
-
- const unsigned int inputDimSize = inputIdx >= 0 ? inputShape[inputIdx] : 0u;
- const unsigned int alphaDimSize = alphaIdx >= 0 ? alphaShape[alphaIdx] : 0u;
-
- const unsigned int outputIdx = maxNumDims - reverseIdx;
- outputShape[outputIdx] = std::max(inputDimSize, alphaDimSize);
- }
+ return CalculateMaxShape(inputShape, alphaShape);
+}
- return outputShape;
+TensorShape InferSubOutputShape(const TensorShape& input0Shape, const TensorShape& input1Shape)
+{
+ return CalculateMaxShape(input0Shape, input1Shape);
}
} // namespace armnn_driver \ No newline at end of file
diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp
index d1818db1..be255c28 100644
--- a/OutputShapeUtils.hpp
+++ b/OutputShapeUtils.hpp
@@ -17,6 +17,8 @@ armnn::TensorShape InferPadOutputShape(const armnn::TensorShape& inputShape,
armnn::TensorShape InferPreluOutputShape(const armnn::TensorShape& inputShape, const armnn::TensorShape& alphaShape);
+armnn::TensorShape InferSubOutputShape(const armnn::TensorShape& input0Shape, const armnn::TensorShape& input1Shape);
+
} // namespace armnn_driver