aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2019-07-11 10:53:38 +0100
committerSadik Armagan <sadik.armagan@arm.com>2019-07-11 10:53:38 +0100
commit310d8ff46f26b6c656b417ac6dc59ef610aaa513 (patch)
tree5d7f85ce31e7cd0c4fcb5427addf32775de58733
parent366e0a66f4566cf71dff3f850556350709ee66a8 (diff)
downloadandroid-nn-driver-310d8ff46f26b6c656b417ac6dc59ef610aaa513.tar.gz
IVGCVSW-3457 Fix VTS pad dynamic_output_shape test failures
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Icb9a308f19ac6adbcf0d1de4d8733b95bab3bf0a
-rw-r--r--1.1/HalPolicy.cpp12
-rw-r--r--1.2/HalPolicy.cpp20
-rw-r--r--OutputShapeUtils.cpp19
-rw-r--r--OutputShapeUtils.hpp3
4 files changed, 44 insertions, 10 deletions
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp
index dbd380ab..9f2c9be1 100644
--- a/1.1/HalPolicy.cpp
+++ b/1.1/HalPolicy.cpp
@@ -297,10 +297,11 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
return Fail("%s: Could not read output", __func__);
}
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+ armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
if (IsDynamicOutput(outputInfo))
{
- return Fail("%s: Dynamic output not supported", __func__);
+ ALOGD("Output shape not set, will infer from inputs");
+ outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
}
if (!IsLayerSupportedForAnyBackend(__func__,
@@ -318,7 +319,12 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve
input.Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation,
+ 0,
+ *layer,
+ model,
+ data,
+ armnn::Optional<armnn::TensorInfo>(outputInfo));
}
bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 58fcf73c..cdf8c0f4 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -453,12 +453,6 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
return Fail("%s: Could not read output", __func__);
}
- const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
- if (IsDynamicOutput(outputInfo))
- {
- return Fail("%s: Dynamic output not supported", __func__);
- }
-
const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
unsigned int rank = inputInfo.GetNumDimensions();
@@ -468,6 +462,13 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
return Fail("%s: Could not convert paddings", __func__);
}
+ armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicOutput(outputInfo))
+ {
+ ALOGD("Output shape not set, will infer from inputs");
+ outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
+ }
+
// Determine type of padding value
OperandType operandType0;
OperandType operandType2;
@@ -528,7 +529,12 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con
input.Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
- return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
+ return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
+ 0,
+ *layer,
+ model,
+ data,
+ armnn::Optional<armnn::TensorInfo>(outputInfo));
}
bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp
index a0c624c8..a1a49d70 100644
--- a/OutputShapeUtils.cpp
+++ b/OutputShapeUtils.cpp
@@ -6,6 +6,7 @@
#include "OutputShapeUtils.hpp"
#include <algorithm>
+#include <vector>
namespace armnn_driver
{
@@ -17,6 +18,24 @@ bool IsDynamicOutput(const TensorInfo& outputInfo)
return outputInfo.GetNumElements() == 0u;
}
+TensorShape InferPadOutputShape(const TensorShape& inputShape,
+ const std::vector<std::pair<unsigned int, unsigned int>>& padList)
+{
+ const unsigned int numDims = inputShape.GetNumDimensions();
+
+ std::vector<unsigned int> outputDims;
+ TensorShape outputShape = TensorShape(numDims);
+ for (unsigned int dim = 0; dim < numDims; ++dim)
+ {
+ unsigned int dimSize = inputShape[dim];
+ const std::pair<unsigned int, unsigned int>& dimPadding = padList[dim];
+ dimSize += dimPadding.first;
+ dimSize += dimPadding.second;
+ outputShape[dim] = dimSize;
+ }
+ return outputShape;
+}
+
TensorShape InferPreluOutputShape(const TensorShape& inputShape, const TensorShape& alphaShape)
{
// NOTE: The inferred PReLU output size will be the maximum size along each dimension
diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp
index 6e2a76db..d1818db1 100644
--- a/OutputShapeUtils.hpp
+++ b/OutputShapeUtils.hpp
@@ -12,6 +12,9 @@ namespace armnn_driver
bool IsDynamicOutput(const armnn::TensorInfo& outputInfo);
+armnn::TensorShape InferPadOutputShape(const armnn::TensorShape& inputShape,
+ const std::vector<std::pair<unsigned int, unsigned int>>& padList);
+
armnn::TensorShape InferPreluOutputShape(const armnn::TensorShape& inputShape, const armnn::TensorShape& alphaShape);
} // namespace armnn_driver