From c79fa1b558253095c6e039eaf122dff0b4de334d Mon Sep 17 00:00:00 2001 From: Mohamed Nour Abouelseoud Date: Wed, 2 Jan 2019 14:32:24 +0000 Subject: IVGCVSW-2205 Reversed workaround for COMPMID-1813 Change-Id: Icf7ef88a2eaef80ec32cc718b0ca9d26e830ed07 --- .../neon/workloads/NeonDepthwiseConvolutionWorkload.cpp | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'src/backends/neon/workloads') diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp index 02c17851cd..6cad12cba8 100644 --- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp +++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp @@ -73,17 +73,6 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload( m_BiasTensor = std::make_unique(); BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout); } - else - { - // Workaround for COMPMID-1813 - m_BiasTensor = std::make_unique(); - TensorInfo biasTensorInfo({weightInfo.GetShape()[dataLayoutIndex.GetChannelsIndex()]}, - weightInfo.GetDataType() == DataType::QuantisedAsymm8 ? DataType::Signed32 : - weightInfo.GetDataType(), - weightInfo.GetQuantizationScale() * - info.m_InputTensorInfos[0].GetQuantizationScale()); - BuildArmComputeTensor(*m_BiasTensor, biasTensorInfo, m_Data.m_Parameters.m_DataLayout); - } arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY, @@ -134,10 +123,6 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload( { InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias); } - else - { - InitialiseArmComputeTensorEmpty(*m_BiasTensor); - } m_pDepthwiseConvolutionLayer->prepare(); FreeUnusedTensors(); -- cgit v1.2.1