aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2018-10-04 16:03:07 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:58 +0100
commitd59116ecb54c5bfe828d82ea0bc3367bc9b8c5dd (patch)
tree782d777544416eca8dcf8b924b710a395a75ad36
parentde9011bc446d767932b6fec356f65791dff685e5 (diff)
downloadarmnn-d59116ecb54c5bfe828d82ea0bc3367bc9b8c5dd.tar.gz
IVGCVSW-1889 - Unit test Convolution2d with NHWC
* Added simple convolution Unit test * Set the data layout correctly in workloads Change-Id: Ie71b8415f6abc392a84900fc4438b7416fbb558a
-rw-r--r--src/armnn/layers/Convolution2dLayer.cpp2
-rw-r--r--src/backends/WorkloadData.hpp2
-rw-r--r--src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp8
-rw-r--r--src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp8
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp8
-rw-r--r--src/backends/test/ArmComputeCl.cpp2
-rw-r--r--src/backends/test/ArmComputeNeon.cpp1
-rw-r--r--src/backends/test/Conv2dTestImpl.hpp91
-rw-r--r--src/backends/test/LayerTests.cpp54
-rw-r--r--src/backends/test/LayerTests.hpp7
10 files changed, 171 insertions, 12 deletions
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index 07d6d7eee4..d4b67cca3f 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -27,8 +27,6 @@ std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const Graph& graph
descriptor.m_Weight = m_Weight.get();
- descriptor.m_DataLayout = GetParameters().m_DataLayout;
-
if (m_Param.m_BiasEnabled)
{
BOOST_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
diff --git a/src/backends/WorkloadData.hpp b/src/backends/WorkloadData.hpp
index aac2228695..c7777b0eed 100644
--- a/src/backends/WorkloadData.hpp
+++ b/src/backends/WorkloadData.hpp
@@ -145,13 +145,11 @@ struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2
Convolution2dQueueDescriptor()
: m_Weight(nullptr)
, m_Bias(nullptr)
- , m_DataLayout(DataLayout::NCHW)
{
}
const ConstCpuTensorHandle* m_Weight;
const ConstCpuTensorHandle* m_Bias;
- DataLayout m_DataLayout;
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp
index 813808345e..0d70ddbd6c 100644
--- a/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dFloatWorkload.cpp
@@ -25,7 +25,7 @@ ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQu
const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
m_Data.m_Parameters.m_StrideY,
@@ -38,7 +38,7 @@ ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQu
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
m_Data.ValidateInputsOutputs("ClConvolution2dFloat32Workload", 1, 1);
@@ -46,6 +46,10 @@ ClConvolution2dFloatWorkload::ClConvolution2dFloatWorkload(const Convolution2dQu
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
m_ConvolutionLayer.configure(&input,
m_KernelTensor.get(),
m_BiasTensor.get(),
diff --git a/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp b/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp
index d9b9dfd833..4f8da34e31 100644
--- a/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dUint8Workload.cpp
@@ -24,7 +24,7 @@ ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQu
const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_KernelTensor, weightInfo, descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
m_Data.m_Parameters.m_StrideY,
@@ -37,7 +37,7 @@ ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQu
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
m_Data.ValidateInputsOutputs("ClConvolution2dUint8Workload", 1, 1);
@@ -45,6 +45,10 @@ ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQu
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
m_ConvolutionLayer.configure(&input,
m_KernelTensor.get(),
m_BiasTensor.get(),
diff --git a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
index 3b9626d7d3..02edabfd9c 100644
--- a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
@@ -62,13 +62,17 @@ NeonConvolution2dBaseWorkload<dataTypes...>::NeonConvolution2dBaseWorkload(
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
m_KernelTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_KernelTensor, m_Data.m_Weight->GetTensorInfo(), descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_KernelTensor, m_Data.m_Weight->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
if (m_Data.m_Parameters.m_BiasEnabled)
{
m_BiasTensor = std::make_unique<arm_compute::Tensor>();
- BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), descriptor.m_DataLayout);
+ BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
}
arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
diff --git a/src/backends/test/ArmComputeCl.cpp b/src/backends/test/ArmComputeCl.cpp
index b4ec9bab16..d432a26f34 100644
--- a/src/backends/test/ArmComputeCl.cpp
+++ b/src/backends/test/ArmComputeCl.cpp
@@ -58,6 +58,8 @@ ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false)
ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false)
ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest)
+ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
+
// Depthwise Convolution
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true)
ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false)
diff --git a/src/backends/test/ArmComputeNeon.cpp b/src/backends/test/ArmComputeNeon.cpp
index a5733d8a18..7a60c31f73 100644
--- a/src/backends/test/ArmComputeNeon.cpp
+++ b/src/backends/test/ArmComputeNeon.cpp
@@ -33,6 +33,7 @@ ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false)
ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false)
ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest)
+ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
namespace
{
diff --git a/src/backends/test/Conv2dTestImpl.hpp b/src/backends/test/Conv2dTestImpl.hpp
index c593c7ba26..8e29615c47 100644
--- a/src/backends/test/Conv2dTestImpl.hpp
+++ b/src/backends/test/Conv2dTestImpl.hpp
@@ -194,6 +194,97 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(armnn::IWorkloadFactory& workl
}
template<typename T, typename B>
+LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ const boost::multi_array<T, 4>& input,
+ const boost::multi_array<T, 4>& kernel,
+ const boost::multi_array<B, 1>& bias,
+ const boost::multi_array<T, 4>& outputExpected,
+ armnn::DataLayout dataLayout,
+ float qScale,
+ int32_t qOffset,
+ uint32_t padLeft = 1,
+ uint32_t padTop = 1,
+ uint32_t padRight = 1,
+ uint32_t padBottom = 1,
+ uint32_t strideX = 1,
+ uint32_t strideY = 1)
+{
+ unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
+ unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
+ unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
+ unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]);
+
+ unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
+ unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
+ unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
+ unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
+
+ unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+ unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+ unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+ unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+
+ bool biasEnabled = bias.size() > 0;
+
+ // Creates the tensors.
+ armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
+ armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType<T>());
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+
+ // Construct the input data.
+ std::vector<T> inputData;
+ inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
+ auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+ // Construct the output data, with bias applied, as appropriate.
+ std::vector<T> outputData;
+ outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
+
+ LayerTestResult<T, 4> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+ AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+
+ armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+
+ armnn::Convolution2dQueueDescriptor data;
+
+ data.m_Weight = &weightsTensor;
+ data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
+ data.m_Parameters.m_StrideX = strideX;
+ data.m_Parameters.m_StrideY = strideY;
+ data.m_Parameters.m_PadLeft = padLeft;
+ data.m_Parameters.m_PadRight = padRight;
+ data.m_Parameters.m_PadTop = padTop;
+ data.m_Parameters.m_PadBottom = padBottom;
+ data.m_Parameters.m_BiasEnabled = biasEnabled;
+ data.m_Parameters.m_DataLayout = dataLayout;
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+
+ workloadFactory.Finalize();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+ return ret;
+}
+
+template<typename T, typename B>
LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(armnn::IWorkloadFactory& workloadFactory,
const boost::multi_array<T, 4>& input,
const boost::multi_array<T, 4>& kernel,
diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp
index 78d4d62089..066d0c28f4 100644
--- a/src/backends/test/LayerTests.cpp
+++ b/src/backends/test/LayerTests.cpp
@@ -236,6 +236,54 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory&
qOffset);
}
+template<typename T>
+LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
+ float qScale,
+ int32_t qOffset,
+ bool biasEnabled,
+ armnn::DataLayout dataLayout)
+{
+ // Use common single-batch 5x5 image.
+
+ armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+ boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
+ {
+ 1, 5, 2, 3,
+ 8, 7, 3, 6,
+ 3, 3, 9, 1
+ });
+
+
+ // Use a 2-element batch of 3-channel 3x3 kernels.
+ armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
+ 4, 5, 6,
+ 0, 0, 0,
+ 3, 2, 1
+ });
+
+ // Expected output is 1 batch of a 5x5 image.
+ armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+
+ const std::vector<float> outputData =
+ {
+ 23, 41, 33, 21,
+ 44, 65, 76, 52,
+ 82, 85, 79, 42
+ };
+
+ boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
+
+ return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
+ input,
+ kernel,
+ boost::multi_array<T, 1>(),
+ expectedOutput,
+ dataLayout,
+ qScale,
+ qOffset);
+}
+
LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
bool biasEnabled)
{
@@ -254,6 +302,12 @@ LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& wo
return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled);
}
+LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
+ bool biasEnabled)
+{
+ return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
+}
+
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
bool biasEnabled)
{
diff --git a/src/backends/test/LayerTests.hpp b/src/backends/test/LayerTests.hpp
index e4ebaff527..12bcdd8bbc 100644
--- a/src/backends/test/LayerTests.hpp
+++ b/src/backends/test/LayerTests.hpp
@@ -50,10 +50,13 @@ struct LayerTestResult
};
LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
- bool biasEnabled);
+ bool biasEnabled);
LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
- bool biasEnabled);
+ bool biasEnabled);
+
+LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
+ bool biasEnabled);
LayerTestResult<float, 4>
Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory);