From cb8ac845da5a6297f7009e26df282a2bfd73f3b9 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Fri, 5 Jul 2019 15:47:07 +0100 Subject: IVGCVSW-3301 Support PAD_V2 in HAL1.2 driver Signed-off-by: Aron Virginas-Tar Change-Id: If3da7b9015fb816bb27b85ab491102005859b3b0 --- 1.1/HalPolicy.cpp | 33 +++---------------- 1.2/HalPolicy.cpp | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1.2/HalPolicy.hpp | 2 ++ ConversionUtils.hpp | 46 ++++++++++++++++++++++++++- 4 files changed, 142 insertions(+), 30 deletions(-) diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index fc4c7a2c..0f00910f 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -270,48 +270,24 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data) { LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) { return Fail("%s: Operation has invalid inputs", __func__); } const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - - const Operand* paddingsOperand = GetInputOperand(operation, 1, model); - - if (!paddingsOperand) - { - return Fail("%s: Could not read paddings operand", __func__); - } - unsigned int rank = inputInfo.GetNumDimensions(); - armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand); - if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2) - { - return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank); - } - - std::vector paddings; - GetTensorInt32Values(*paddingsOperand, paddings, model, data); - // add padding for each dimension of input tensor. armnn::PadDescriptor descriptor; - for (unsigned int i = 0; i < paddings.size() - 1; i += 2) + if (!ConvertPaddings(operation, model, data, rank, descriptor)) { - int paddingBeforeInput = paddings[i]; - int paddingAfterInput = paddings[i + 1]; - if (paddingBeforeInput < 0 || paddingAfterInput < 0) - { - return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__); - } - descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput); + return Fail("%s: Could not convert paddings", __func__); } const Operand* output = GetOutputOperand(operation, 0, model); if (!output) { - return Fail("%s: Could not read output 0", __func__); + return Fail("%s: Could not read output", __func__); } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); @@ -724,6 +700,5 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } - } // namespace hal_1_1 -} // namespace armnn_driver +} // namespace armnn_driver \ No newline at end of file diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index e058e026..836977da 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -9,6 +9,7 @@ #include "../1.1/HalPolicy.hpp" #include +#include #include @@ -139,6 +140,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertConv2d(operation, model, data); case V1_2::OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation, model, data); + case V1_2::OperationType::PAD_V2: + return ConvertPadV2(operation, model, data); case V1_2::OperationType::PRELU: return ConvertPrelu(operation, model, data); case V1_2::OperationType::RESIZE_BILINEAR: @@ -429,6 +432,94 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } +bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Could not read input 0", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + + armnn::PadDescriptor descriptor; + if (!ConvertPaddings(operation, model, data, rank, descriptor)) + { + return Fail("%s: Could not convert paddings", __func__); + } + + // Determine type of padding value + OperandType operandType0; + OperandType operandType2; + + if (!GetOperandType(operation, 0, model, operandType0) || + !GetOperandType(operation, 2, model, operandType2)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // Read value to use for padding + if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16) + { + armnn::Half f16PadValue; + if (!GetInputScalar(operation, 2, operandType2, f16PadValue, model, data)) + { + return Fail("%s: Could not read input 2 (FLOAT16)", __func__); + } + + descriptor.m_PadValue = f16PadValue; + } + else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32) + { + if (!GetInputFloat32(operation, 2, descriptor.m_PadValue, model, data)) + { + return Fail("%s: Could not read input 2 (FLOAT32)", __func__); + } + } + else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32) + { + int32_t quantizedPadValue = 0; + if (!GetInputInt32(operation, 2, quantizedPadValue, model, data)) + { + return Fail("%s: Could not read input 2 (INT32)", __func__); + } + + descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue, + inputInfo.GetQuantizationScale(), + inputInfo.GetQuantizationOffset()); + } + else + { + return Fail("%s: Operation has invalid inputs: type mismatch", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + if (!IsLayerSupportedForAnyBackend(__func__, + armnn::IsPadSupported, + data.m_Backends, + inputInfo, + outputInfo, + descriptor)) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data) { LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index a7bef247..9d6dd497 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -35,6 +35,8 @@ private: static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertResize(const Operation& operation, diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 9a711cb7..d30b8a4e 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -5,11 +5,12 @@ #pragma once +#include "Utils.hpp" + #include #include "armnn/src/armnnUtils/DataLayoutIndexed.hpp" #include "armnn/src/armnnUtils/Permute.hpp" -#include "Utils.hpp" #include #include @@ -1136,6 +1137,49 @@ bool ConvertToActivation(const HalOperation& operation, return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +template +bool ConvertPaddings(const HalOperation& operation, + const HalModel& model, + ConversionData& data, + unsigned int rank, + armnn::PadDescriptor& padDescriptor) +{ + using HalOperand = typename HalPolicy::Operand; + + const HalOperand* paddingsOperand = GetInputOperand(operation, 1, model); + if (!paddingsOperand) + { + return Fail("%s: Could not read paddings operand", __func__); + } + + armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand); + if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2) + { + return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank); + } + + std::vector paddings; + GetTensorInt32Values(*paddingsOperand, paddings, model, data); + + // add padding for each dimension of input tensor. + for (unsigned int i = 0; i < paddings.size() - 1; i += 2) + { + int paddingBeforeInput = paddings[i]; + int paddingAfterInput = paddings[i + 1]; + + if (paddingBeforeInput < 0 || paddingAfterInput < 0) + { + return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__); + } + + padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput); + } + + return true; +} + template -- cgit v1.2.1