From 3c673949b4ed3ab3129859b18439ed8fe87a6ad1 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Thu, 25 Jul 2019 09:26:06 +0100 Subject: IVGCVSW-3521 CpuAcc V1.2 pad Failures * Fixed Pad and PadV2 failures and skips. * Templated ConvertPad to enable float16 tests to run. Signed-off-by: Mike Kelly Change-Id: I50ded84fe44ea5d5949e877f383f32adff88680d --- ConversionUtils.hpp | 70 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) (limited to 'ConversionUtils.hpp') diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 5ebec6b3..fa686a6f 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1651,4 +1651,74 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model armnn::Optional(outputInfo)); } +template +bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data) +{ + ALOGV("hal_1_1::HalPolicy::ConvertPad()"); + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + + armnn::PadDescriptor descriptor; + if (!ConvertPaddings(operation, model, data, rank, descriptor)) + { + return Fail("%s: Could not convert paddings", __func__); + } + + // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad + // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as + // (QuantizationOffset - QuantizationOffset) * scale = 0. + if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8) + { + descriptor.m_PadValue = inputInfo.GetQuantizationOffset(); + } + + const HalOperand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output", __func__); + } + + armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + ALOGD("Output shape not set, will infer from inputs"); + outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList)); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPadSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + return SetupAndTrackLayerOutputSlot(operation, + 0, + *layer, + model, + data, + armnn::Optional(outputInfo)); +} + } // namespace armnn_driver -- cgit v1.2.1