aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-07-22 15:30:22 +0100
committermike.kelly <mike.kelly@arm.com>2019-07-22 15:38:48 +0000
commit6f3785d4f612e06854ab63dffbd2cd3d484c2e14 (patch)
tree71a082042e0c8fd95774ac515812152d1bbade22
parentaab709b3598b07223ba2da1ab8a18f95e30ff70d (diff)
downloadarmnn-6f3785d4f612e06854ab63dffbd2cd3d484c2e14.tar.gz
IVGCVSW-3548 Use BuildArmComputePadStrideInfo() in CL and NEON convolution workloads
* Refactoring: use existing utility function for creating arm_compute::PadStrideInfo objects in CL and NEON convolution workloads instead of duplicating code Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: Id5e5a0f264e20af99dabce8dd8c6b782dedb94e6
-rw-r--r--src/backends/cl/workloads/ClConvolution2dWorkload.cpp10
-rw-r--r--src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp9
-rw-r--r--src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp9
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp11
5 files changed, 7 insertions, 40 deletions
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 3b6aa6cd7c..e8af0ee3b7 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -66,14 +66,6 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
- arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
- m_Data.m_Parameters.m_StrideY,
- m_Data.m_Parameters.m_PadLeft,
- m_Data.m_Parameters.m_PadRight,
- m_Data.m_Parameters.m_PadTop,
- m_Data.m_Parameters.m_PadBottom,
- arm_compute::DimensionRoundingType::FLOOR);
-
const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX,
m_Data.m_Parameters.m_DilationY);
@@ -92,6 +84,8 @@ ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescrip
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
+ arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
+
m_ConvolutionLayer.configure(&input,
m_KernelTensor.get(),
m_BiasTensor.get(),
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index e681e95d90..126950c348 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -91,14 +91,6 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
- const arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
- m_Data.m_Parameters.m_StrideY,
- m_Data.m_Parameters.m_PadLeft,
- m_Data.m_Parameters.m_PadRight,
- m_Data.m_Parameters.m_PadTop,
- m_Data.m_Parameters.m_PadBottom,
- arm_compute::DimensionRoundingType::FLOOR);
-
const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
m_Data.m_Parameters.m_DilationX,
m_Data.m_Parameters.m_DilationY);
@@ -120,6 +112,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
// Get the depth multiplier
const unsigned int depthMultiplier = weightInfo.GetShape()[0];
+ arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
// Check for optimisation opportunities.
bool use3x3Optimisation = (weightInfo.GetShape()[2] == 3) && (weightInfo.GetShape()[3] == 3);
diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
index cedfebd33a..7c0736645b 100644
--- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
@@ -65,14 +65,6 @@ ClTransposeConvolution2dWorkload::ClTransposeConvolution2dWorkload(
m_WeightsTensor = std::make_unique<arm_compute::CLTensor>();
BuildArmComputeTensor(*m_WeightsTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
- arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
- m_Data.m_Parameters.m_StrideY,
- m_Data.m_Parameters.m_PadLeft,
- m_Data.m_Parameters.m_PadRight,
- m_Data.m_Parameters.m_PadTop,
- m_Data.m_Parameters.m_PadBottom,
- arm_compute::DimensionRoundingType::FLOOR);
-
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasesTensor = std::make_unique<arm_compute::CLTensor>();
@@ -89,6 +81,7 @@ ClTransposeConvolution2dWorkload::ClTransposeConvolution2dWorkload(
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
+ arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
m_Layer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, padStrideInfo);
InitializeArmComputeClTensorData(*m_WeightsTensor, m_Data.m_Weight);
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 564ba0310f..683decd45c 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -81,13 +81,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
- arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
- m_Data.m_Parameters.m_StrideY,
- m_Data.m_Parameters.m_PadLeft,
- m_Data.m_Parameters.m_PadRight,
- m_Data.m_Parameters.m_PadTop,
- m_Data.m_Parameters.m_PadBottom,
- arm_compute::DimensionRoundingType::FLOOR);
+ arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX,
m_Data.m_Parameters.m_DilationY);
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index 7b8e36827a..bfe4ddaea7 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -93,15 +93,6 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
- arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
- m_Data.m_Parameters.m_StrideY,
- m_Data.m_Parameters.m_PadLeft,
- m_Data.m_Parameters.m_PadRight,
- m_Data.m_Parameters.m_PadTop,
- m_Data.m_Parameters.m_PadBottom,
- arm_compute::DimensionRoundingType::FLOOR);
-
-
const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY);
@@ -120,6 +111,8 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
// Get the depth multiplier
const unsigned int depthMultiplier = weightInfo.GetShape()[0];
+ arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
+
// Check for optimisation opportunities.
bool use3x3Optimisation = (weightInfo.GetShape()[2] == 3) && (weightInfo.GetShape()[3] == 3);
if (use3x3Optimisation)