diff options
author | Francis Murtagh <francis.murtagh@arm.com> | 2018-10-04 16:03:07 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:58 +0100 |
commit | d59116ecb54c5bfe828d82ea0bc3367bc9b8c5dd (patch) | |
tree | 782d777544416eca8dcf8b924b710a395a75ad36 /src/backends/cl/workloads | |
parent | de9011bc446d767932b6fec356f65791dff685e5 (diff) | |
download | armnn-d59116ecb54c5bfe828d82ea0bc3367bc9b8c5dd.tar.gz |
IVGCVSW-1889 - Unit test Convolution2d with NHWC
* Added simple convolution Unit test
* Set the data layout correctly in workloads
Change-Id: Ie71b8415f6abc392a84900fc4438b7416fbb558a
Diffstat (limited to 'src/backends/cl/workloads')
-rw-r--r-- | src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp | 8 | ||||
-rw-r--r-- | src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp | 8 |
2 files changed, 12 insertions, 4 deletions
diff --git a/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp index 813808345e..0d70ddbd6c 100644 --- a/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp +++ b/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp @@ -25,7 +25,7 @@ ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQu const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo(); m_KernelTensor = std::make_unique<arm_compute::CLTensor>(); - BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout); + BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout); arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY, @@ -38,7 +38,7 @@ ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQu if (m_Data.m_Parameters.m_BiasEnabled) { m_BiasTensor = std::make_unique<arm_compute::CLTensor>(); - BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout); + BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout); } m_Data.ValidateInputsOutputs("ClConvolution2dFloat32Workload", 1, 1); @@ -46,6 +46,10 @@ ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQu arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); + input.info()->set_data_layout(aclDataLayout); + output.info()->set_data_layout(aclDataLayout); + m_ConvolutionLayer.configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), diff --git a/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp b/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp index d9b9dfd833..4f8da34e31 100644 --- a/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp +++ b/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp @@ -24,7 +24,7 @@ ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQu const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo(); m_KernelTensor = std::make_unique<arm_compute::CLTensor>(); - BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout); + BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout); arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY, @@ -37,7 +37,7 @@ ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQu if (m_Data.m_Parameters.m_BiasEnabled) { m_BiasTensor = std::make_unique<arm_compute::CLTensor>(); - BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout); + BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout); } m_Data.ValidateInputsOutputs("ClConvolution2dUint8Workload", 1, 1); @@ -45,6 +45,10 @@ ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQu arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); + input.info()->set_data_layout(aclDataLayout); + output.info()->set_data_layout(aclDataLayout); + m_ConvolutionLayer.configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), |