aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads
diff options
context:
space:
mode:
authorNikhil Raj <nikhil.raj@arm.com>2018-10-12 13:51:57 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:53 +0100
commitcec6b655d9f6ddb73c721ca47a7d67eeaad5c043 (patch)
tree898dbc90785f77f9c724bb8de61d7fb1b1ce3c0b /src/backends/neon/workloads
parentf3eb46d23c6001150d36d80acac7ad1247174630 (diff)
downloadarmnn-cec6b655d9f6ddb73c721ca47a7d67eeaad5c043.tar.gz
IVGCVSW-1922 Unit test for DepthwiseConvolution with NHWC
Change-Id: I3e6e5b9a62f30d03c05bd7178adea8f4c8275da8
Diffstat (limited to 'src/backends/neon/workloads')
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp8
2 files changed, 12 insertions, 4 deletions
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
index 742a768b94..4b266f3288 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
@@ -20,12 +20,12 @@ NeonDepthwiseConvolutionFloatWorkload::NeonDepthwiseConvolutionFloatWorkload(
const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
m_KernelTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
@@ -41,6 +41,10 @@ NeonDepthwiseConvolutionFloatWorkload::NeonDepthwiseConvolutionFloatWorkload(
arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
bool use3x3Optimisation = weightInfo.GetShape()[3] == 3 && weightInfo.GetShape()[2] == 3;
if (use3x3Optimisation)
{
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
index 722b778eba..6c6c2dfb6c 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
@@ -20,12 +20,12 @@ NeonDepthwiseConvolutionUint8Workload::NeonDepthwiseConvolutionUint8Workload(
const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
m_KernelTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
@@ -41,6 +41,10 @@ NeonDepthwiseConvolutionUint8Workload::NeonDepthwiseConvolutionUint8Workload(
arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
bool use3x3Optimisation = weightInfo.GetShape()[3] == 3 && weightInfo.GetShape()[2] == 3;
if (use3x3Optimisation)
{