diff options
author | Nina Drozd <nina.drozd@arm.com> | 2018-10-01 14:20:25 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-12 11:59:53 +0100 |
commit | 62a4a9f50a91d4c1b33205a61da7bf97f9afd32b (patch) | |
tree | 48e83e5461925de246af940082d674f7d11e6d00 /1.1/HalPolicy.cpp | |
parent | 378333dae8693b1c8e3aef014dbd8373cdccdcea (diff) | |
download | android-nn-driver-62a4a9f50a91d4c1b33205a61da7bf97f9afd32b.tar.gz |
IVGCVSW-1886 - adding converter method for Pad
!armnn:151190
Change-Id: I37eb70543f23b0ef2f6027998413c62784d9b181
Diffstat (limited to '1.1/HalPolicy.cpp')
-rw-r--r-- | 1.1/HalPolicy.cpp | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index de743088..a94f3058 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -31,6 +31,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertSub(operation, model, data); case V1_1::OperationType::MEAN: return ConvertMean(operation, model, data); + case V1_1::OperationType::PAD: + return ConvertPad(operation, model, data); default: return Fail("%s: Operation type %s not supported in ArmnnDriver", __func__, toString(operation.type).c_str()); @@ -203,5 +205,72 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data) +{ + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + + const Operand* paddingsOperand = GetInputOperand(operation, 1, model); + + if (!paddingsOperand) + { + return Fail("%s: Could not read paddings operand", __func__); + } + + unsigned int rank = inputInfo.GetNumDimensions(); + armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand); + if (paddingsOperandShape.GetNumDimensions() != rank || paddingsOperandShape.GetNumElements() != 2) + { + return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank); + } + + std::vector<int32_t> paddings; + GetTensorInt32Values(*paddingsOperand, paddings, model, data); + + // add padding for each dimension of input tensor. + armnn::PadDescriptor descriptor; + for (unsigned int i = 0; i < paddings.size() - 1; i += 2) + { + int paddingBeforeInput = paddings[i]; + int paddingAfterInput = paddings[i + 1]; + if (paddingBeforeInput < 0 || paddingAfterInput < 0) + { + return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__); + } + descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + if (!IsLayerSupported(__func__, + armnn::IsPadSupported, + data.m_Compute, + inputInfo, + outputInfo, + descriptor)) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + } // namespace hal_1_1 } // namespace armnn_driver
\ No newline at end of file |