aboutsummaryrefslogtreecommitdiff
path: root/src/backends/NeonWorkloads
diff options
context:
space:
mode:
authorNikhil Raj <nikhil.raj@arm.com>2018-09-25 16:16:13 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:57 +0100
commita05c2106ba5dd4361363049b0588d6995fd01f45 (patch)
tree39b03ef4bcdd448df5a7ff76f56d7f7a8b0a42c9 /src/backends/NeonWorkloads
parent711fa31d5d43b904d28bcd407cd7e921529a37ca (diff)
downloadarmnn-a05c2106ba5dd4361363049b0588d6995fd01f45.tar.gz
IVGCVSW-1910 Add data layout parameter for DepthwiseConvolution
Change-Id: Ia14c9d8c0a38b669a7589e63d74424e398790e54
Diffstat (limited to 'src/backends/NeonWorkloads')
-rw-r--r--src/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp8
-rw-r--r--src/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.cpp4
-rw-r--r--src/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp4
3 files changed, 8 insertions, 8 deletions
diff --git a/src/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp b/src/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp
index ec6c97700b..ef60b3238d 100644
--- a/src/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp
+++ b/src/backends/NeonWorkloads/NeonDepthwiseConvolutionBaseWorkload.cpp
@@ -17,11 +17,11 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
const boost::optional<TensorInfo>& biases)
{
const arm_compute::TensorInfo aclInputInfo =
- armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclOutputInfo =
- armcomputetensorutils::BuildArmComputeTensorInfo(output);
+ armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
const arm_compute::TensorInfo aclWeightsInfo =
- armcomputetensorutils::BuildArmComputeTensorInfo(weights);
+ armcomputetensorutils::BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
arm_compute::TensorInfo aclBiasesInfo;
arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
@@ -30,7 +30,7 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
{
BOOST_ASSERT(biases.is_initialized());
- aclBiasesInfo = armcomputetensorutils::BuildArmComputeTensorInfo(biases.get());
+ aclBiasesInfo = armcomputetensorutils::BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
}
diff --git a/src/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.cpp b/src/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.cpp
index b3f7da41b5..db8e27ab23 100644
--- a/src/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.cpp
+++ b/src/backends/NeonWorkloads/NeonDepthwiseConvolutionFloatWorkload.cpp
@@ -20,12 +20,12 @@ NeonDepthwiseConvolutionFloatWorkload::NeonDepthwiseConvolutionFloatWorkload(
const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
m_KernelTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo);
+ BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout);
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
+ BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
}
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
diff --git a/src/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp b/src/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp
index de8a2aeedb..3efc5b0834 100644
--- a/src/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp
+++ b/src/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp
@@ -20,12 +20,12 @@ NeonDepthwiseConvolutionUint8Workload::NeonDepthwiseConvolutionUint8Workload(
const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
m_KernelTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo);
+ BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout);
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
+ BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
}
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,