From 915f2a7327098636cc5a292510cd00b715e67e90 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Fri, 15 Apr 2022 14:12:08 +0100 Subject: IVGCVSW-6127 ConstTensorsAsInput: DepthwiseConvolution2d !armnn:7417 Signed-off-by: Cathal Corbett Change-Id: Ic37d6b8677e040c60a90358dd0a4a8eb33fb6ea0 --- ConversionUtils_1_2.hpp | 53 ++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 27 deletions(-) (limited to 'ConversionUtils_1_2.hpp') diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index 0ff50cff..b1f2d05a 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -523,12 +523,11 @@ bool ConvertDepthwiseConv2d_1_2(const HalOperation& operation, const HalModel& m // ArmNN does not currently support non-fixed weights or bias // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ] const HalOperand* weightsOperand = GetInputOperand(operation, 1, model); - - if (weightsOperand == nullptr) + if (!weightsOperand) { - return Fail("%s: Operand is invalid", __func__); + return Fail("%s: Could not read weights", __func__); } - if ( weightsOperand->dimensions[0] != 1) + if (weightsOperand->dimensions[0] != 1) { return Fail("%s: Invalid weights; for depthwise convolution, dimension 0 must be 1 but it is %i", __func__, weightsOperand->dimensions[0] ); @@ -550,30 +549,27 @@ bool ConvertDepthwiseConv2d_1_2(const HalOperation& operation, const HalModel& m unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex(); unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex(); - // The layout for weights in depthwise is [ 1, H, W, O] and it's the same in ArmNN. No need to permute anything. - const ConstTensorPin weightsPin = - ConvertOperationInputToConstTensorPin(operation, - 1, - model, - data); - - // Bias is a 1D tensor - const ConstTensorPin biasPin = - ConvertOperationInputToConstTensorPin(operation, 2, model, data); + LayerInputHandle weightsInput = ConvertToLayerInputHandle(operation, 1, model, data); + if (!weightsInput.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } - if (!weightsPin.IsValid()) + const HalOperand* biasOperand = GetInputOperand(operation, 2, model); + if (!biasOperand) { - return Fail("%s: Operation has invalid weights", __func__); + return Fail("%s: Could not read bias", __func__); } - if (!biasPin.IsValid()) + LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D + if (!biasInput.IsValid()) { - return Fail("%s: Operation has invalid biases", __func__); + return Fail("%s: Operation has invalid inputs", __func__); } - ConstTensor weights = weightsPin.GetConstTensor(); - ConstTensor bias = biasPin.GetConstTensor(); - SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo); + biasInput.SanitizeQuantizationScale(weightsInput, input); + armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo(); + armnn::TensorInfo biasInfo = biasInput.GetTensorInfo(); ActivationFn activation; @@ -589,8 +585,8 @@ bool ConvertDepthwiseConv2d_1_2(const HalOperation& operation, const HalModel& m return Fail("%s: Operation has invalid inputs (implicit padding)", __func__); } - const uint32_t kernelX = weights.GetShape()[2]; - const uint32_t kernelY = weights.GetShape()[1]; + const uint32_t kernelX = weightsInfo.GetShape()[2]; + const uint32_t kernelY = weightsInfo.GetShape()[1]; const uint32_t inputX = inputInfo.GetShape()[widthIndex]; const uint32_t inputY = inputInfo.GetShape()[heightIndex]; @@ -618,7 +614,7 @@ bool ConvertDepthwiseConv2d_1_2(const HalOperation& operation, const HalModel& m } desc.m_BiasEnabled = true; - Optional biases(bias.GetInfo()); + Optional biases(biasInfo); bool isSupported = false; auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) @@ -630,7 +626,7 @@ bool ConvertDepthwiseConv2d_1_2(const HalOperation& operation, const HalModel& m inputInfo, outputInfo, desc, - weights.GetInfo(), + weightsInfo, biases); }; @@ -648,8 +644,7 @@ bool ConvertDepthwiseConv2d_1_2(const HalOperation& operation, const HalModel& m return false; } - IConnectableLayer* startLayer = - data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, Optional(bias)); + armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc); if (!startLayer) { @@ -658,6 +653,10 @@ bool ConvertDepthwiseConv2d_1_2(const HalOperation& operation, const HalModel& m input.Connect(startLayer->GetInputSlot(0)); + // Connect weights and bias inputs + weightsInput.Connect(startLayer->GetInputSlot(1)); + biasInput.Connect(startLayer->GetInputSlot(2)); + return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation); } -- cgit v1.2.1