aboutsummaryrefslogtreecommitdiff
path: root/ConversionUtils.hpp
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2019-07-25 09:26:06 +0100
committerMike Kelly <mike.kelly@arm.com>2019-07-25 09:26:06 +0100
commit3c673949b4ed3ab3129859b18439ed8fe87a6ad1 (patch)
tree0c5a47c6dbf9d03e7cdfdf10c72b8d3696bea59b /ConversionUtils.hpp
parent29404fb3b16b301d630f492a2b89b9eb39b67e63 (diff)
downloadandroid-nn-driver-3c673949b4ed3ab3129859b18439ed8fe87a6ad1.tar.gz
IVGCVSW-3521 CpuAcc V1.2 pad Failures
* Fixed Pad and PadV2 failures and skips. * Templated ConvertPad to enable float16 tests to run. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I50ded84fe44ea5d5949e877f383f32adff88680d
Diffstat (limited to 'ConversionUtils.hpp')
-rw-r--r--ConversionUtils.hpp70
1 files changed, 70 insertions, 0 deletions
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 5ebec6b3..fa686a6f 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1651,4 +1651,74 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model
armnn::Optional<armnn::TensorInfo>(outputInfo));
}
+template<typename HalPolicy,
+ typename HalOperation = typename HalPolicy::Operation,
+ typename HalOperand = typename HalPolicy::Operand,
+ typename HalModel = typename HalPolicy::Model>
+bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
+{
+ ALOGV("hal_1_1::HalPolicy::ConvertPad()");
+
+ LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
+ if (!input.IsValid())
+ {
+ return Fail("%s: Operation has invalid inputs", __func__);
+ }
+
+ const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+ unsigned int rank = inputInfo.GetNumDimensions();
+
+ armnn::PadDescriptor descriptor;
+ if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
+ {
+ return Fail("%s: Could not convert paddings", __func__);
+ }
+
+ // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
+ // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
+ // (QuantizationOffset - QuantizationOffset) * scale = 0.
+ if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
+ {
+ descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
+ }
+
+ const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
+ if (!output)
+ {
+ return Fail("%s: Could not read output", __func__);
+ }
+
+ armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+ if (IsDynamicTensor(outputInfo))
+ {
+ ALOGD("Output shape not set, will infer from inputs");
+ outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
+ }
+
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ IsPadSupported,
+ data.m_Backends,
+ isSupported,
+ inputInfo,
+ outputInfo,
+ descriptor);
+ if (!isSupported)
+ {
+ return false;
+ }
+
+ armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+ assert(layer != nullptr);
+ input.Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
+ 0,
+ *layer,
+ model,
+ data,
+ armnn::Optional<armnn::TensorInfo>(outputInfo));
+}
+
} // namespace armnn_driver