aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMohamed Nour Abouelseoud <mohamednour.abouelseoud@arm.com>2018-11-27 17:35:35 +0000
committerjimfly01 <jim.flynn@arm.com>2018-11-28 12:43:33 +0000
commit9337af3f8eeb5993a52b300cad940f0ac15d6c79 (patch)
tree13a93c2b2abea0dfac38a94a54a9ce8fec7c2001
parentcd45d0b9099ae504f4798500cc712ccf957bc2a1 (diff)
downloadarmnn-branches/armnn_18_11.tar.gz
IVGCVSW-2205 Fixed a bug in DepthWiseConv workload where NCHW layout was assumedbranches/armnn_18_11
Includes a temporary workaround for COMPMID-1813 Change-Id: I3e5217281be072d5b61788ab8a75e818bbc5d247 (cherry picked from commit 7e7261ed9a4f8d48c86e57044312e3df82189713 on master)
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp30
1 files changed, 26 insertions, 4 deletions
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index 1b4887e0e0..d294705eaa 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -8,6 +8,7 @@
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <neon/NeonLayerSupport.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/DataLayoutIndexed.hpp>
namespace armnn
{
@@ -60,11 +61,26 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
m_KernelTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
+ INeonTensorHandle* inputTensorHandle = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0]);
+ INeonTensorHandle* outputTensorHandle = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0]);
+ DataLayoutIndexed dataLayoutIndex(m_Data.m_Parameters.m_DataLayout);
+
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
+ else
+ {
+ // Workaround for COMPMID-1813
+ m_BiasTensor = std::make_unique<arm_compute::Tensor>();
+ TensorInfo biasTensorInfo({weightInfo.GetShape()[dataLayoutIndex.GetChannelsIndex()]},
+ weightInfo.GetDataType() == DataType::QuantisedAsymm8 ? DataType::Signed32 :
+ weightInfo.GetDataType(),
+ weightInfo.GetQuantizationScale() *
+ info.m_InputTensorInfos[0].GetQuantizationScale());
+ BuildArmComputeTensor(*m_BiasTensor, biasTensorInfo, m_Data.m_Parameters.m_DataLayout);
+ }
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
m_Data.m_Parameters.m_StrideY,
@@ -76,14 +92,16 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", 1, 1);
- arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = inputTensorHandle->GetTensor();
+ arm_compute::ITensor& output = outputTensorHandle->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
- bool use3x3Optimisation = weightInfo.GetShape()[3] == 3 && weightInfo.GetShape()[2] == 3;
+ bool use3x3Optimisation = weightInfo.GetShape()[dataLayoutIndex.GetWidthIndex()] == 3 &&
+ weightInfo.GetShape()[dataLayoutIndex.GetHeightIndex()] == 3;
+
if (use3x3Optimisation)
{
m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer3x3>();
@@ -109,10 +127,14 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
- if (m_BiasTensor)
+ if (m_Data.m_Parameters.m_BiasEnabled)
{
InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
+ else
+ {
+ InitialiseArmComputeTensorEmpty(*m_BiasTensor);
+ }
m_pDepthwiseConvolutionLayer->prepare();
FreeUnusedTensors();