aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp15
1 files changed, 0 insertions, 15 deletions
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index 02c17851cd..6cad12cba8 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -73,17 +73,6 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
- else
- {
- // Workaround for COMPMID-1813
- m_BiasTensor = std::make_unique<arm_compute::Tensor>();
- TensorInfo biasTensorInfo({weightInfo.GetShape()[dataLayoutIndex.GetChannelsIndex()]},
- weightInfo.GetDataType() == DataType::QuantisedAsymm8 ? DataType::Signed32 :
- weightInfo.GetDataType(),
- weightInfo.GetQuantizationScale() *
- info.m_InputTensorInfos[0].GetQuantizationScale());
- BuildArmComputeTensor(*m_BiasTensor, biasTensorInfo, m_Data.m_Parameters.m_DataLayout);
- }
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
m_Data.m_Parameters.m_StrideY,
@@ -134,10 +123,6 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
{
InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
- else
- {
- InitialiseArmComputeTensorEmpty(*m_BiasTensor);
- }
m_pDepthwiseConvolutionLayer->prepare();
FreeUnusedTensors();