aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2019-06-19 09:34:37 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2019-06-19 16:03:25 +0100
commit20b1f88309903b576ae030888022f38cce2bbc82 (patch)
treebbbef569dc841bec72bf51b6556c8d6383a1d2b0
parenta57eccbe313557b9eafec40b39bac3115d9b930d (diff)
downloadarmnn-20b1f88309903b576ae030888022f38cce2bbc82.tar.gz
IVGCVSW-3271 Add unit test for dilated DepthwiseConvolution2d
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I0e7132f61001f7b2a9fad3d7b21acf2558c01df4
-rw-r--r--src/backends/backendsCommon/test/Conv2dTestImpl.hpp131
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp401
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp19
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp60
4 files changed, 492 insertions, 119 deletions
diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
index 5f66d2ec85..98e5090e27 100644
--- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
@@ -816,16 +816,17 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
}
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
- typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
-LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
+LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const boost::multi_array<T, 4>& input,
- const boost::multi_array<T, 4>& kernel,
+ const boost::multi_array<T, 4>& originalInput,
+ const boost::multi_array<T, 4>& originalKernel,
const boost::multi_array<B, 1>& bias,
- const boost::multi_array<T, 4>& outputExpected,
+ const boost::multi_array<T, 4>& originalOutputExpected,
float qScale,
int32_t qOffset,
+ const armnn::DataLayout layout = armnn::DataLayout::NCHW,
uint32_t padLeft = 0,
uint32_t padTop = 0,
uint32_t padRight = 0,
@@ -835,30 +836,44 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
uint32_t dilationX = 1,
uint32_t dilationY = 1)
{
- unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
- unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
- unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
- unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]);
+ unsigned int inputHeight = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
+ unsigned int inputWidth = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
+ unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
+ unsigned int inputNum = boost::numeric_cast<unsigned int>(originalInput.shape()[0]);
- unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
- unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
- unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
- unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
+ unsigned int outputHeight = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
+ unsigned int outputWidth = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
+ unsigned int outputChannels = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
+ unsigned int outputNum = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
- unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
- unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
- unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+ unsigned int kernelHeight = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
+ unsigned int kernelWidth = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
+ unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
+ unsigned int kernelDepthMul = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+
+ bool biasEnabled = bias.size() > 0;
+
+ // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
+ BOOST_ASSERT(inputNum == 1);
+ BOOST_ASSERT(outputNum == 1);
+
+ // If a bias is used, its size must equal the number of output channels.
+ BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+
+
+ // Note these tensors will use two (identical) batches.
+ armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
+
+ // Kernel must be NCHW layout always, independently of the layout of the input and output for depthwise convolution.
+ armnn::TensorInfo kernelDesc({kernelDepthMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
- // Creates the tensors.
- armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
- armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
- ArmnnType);
- armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
- if (armnn::IsQuantizedType<T>())
+ if(armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(qScale);
inputTensorInfo.SetQuantizationOffset(qOffset);
@@ -870,45 +885,87 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
biasDesc.SetQuantizationOffset(0);
}
- // Construct the input data.
+ LayerTestResult<T, 4> ret(outputTensorInfo);
+
+ // Construct input data
+ std::vector<T> input;
+ input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
std::vector<T> inputData;
- inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
+ inputData.insert(inputData.end(), input.begin(), input.end());
+ inputData.insert(inputData.end(), input.begin(), input.end());
+
+ // at this point if we require it permute the input data
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (layout == armnn::DataLayout::NHWC)
+ {
+ std::vector<T> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
+ inputData = tmp;
+ }
+
auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
- // Construct the output data, with bias applied, as appropriate.
+ std::vector<T> output;
+ output.assign(originalOutputExpected.data(),
+ originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
+
+ // Apply bias to output data if it is enabled.
+ if(biasEnabled)
+ {
+ std::vector<T> biasV;
+ biasV.assign(bias.data(), bias.data() + outputChannels);
+ ApplyBias(output, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
+ biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
+ outputWidth, outputHeight);
+ }
+
+ // Construct expected output data
std::vector<T> outputData;
- outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
+ outputData.insert(outputData.end(), output.begin(), output.end());
+ outputData.insert(outputData.end(), output.begin(), output.end());
- LayerTestResult<T, 4> ret(outputTensorInfo);
+ // at this point if we require it permute the expected output
+ if (layout == armnn::DataLayout::NHWC)
+ {
+ std::vector<T> tmp(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
+ outputData = tmp;
+ }
ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+ armnn::DepthwiseConvolution2dQueueDescriptor data;
+ armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+ armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+
+ boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
- armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+ if(biasEnabled)
+ {
+ AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+ }
+
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- armnn::DepthwiseConvolution2dQueueDescriptor data;
data.m_Weight = &weightsTensor;
- data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
+ data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
data.m_Parameters.m_StrideX = strideX;
data.m_Parameters.m_StrideY = strideY;
data.m_Parameters.m_PadLeft = padLeft;
data.m_Parameters.m_PadRight = padRight;
data.m_Parameters.m_PadTop = padTop;
data.m_Parameters.m_PadBottom = padBottom;
- data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
+ data.m_Parameters.m_BiasEnabled = biasEnabled;
+ data.m_Parameters.m_DataLayout = layout;
data.m_Parameters.m_DilationX = dilationX;
data.m_Parameters.m_DilationY = dilationY;
- armnn::WorkloadInfo info;
- AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
-
inputHandle->Allocate();
outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 45791e50f2..d9ae546739 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -826,7 +826,7 @@ LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
12., 10., 10., 10.,
12., 10., 10., 10.,
12., 10., 10., 10.,
- 6., 4., 4., 4.
+ 6., 4., 4., 4.
};
return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
@@ -899,7 +899,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
// Use a single-batch 2-channel 5x5 image as input.
armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
- QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
+ QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
+ {
0, 1, 2, 3, 4,
5, 6, 7, 8, 9,
10, 11, 12, 13, 14,
@@ -916,7 +917,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
// Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
- QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
+ QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
+ {
32, 31, 30, 29,
28, 27, 26, 25,
24, 23, 22, 21,
@@ -932,12 +934,14 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
// Calculated using the python tensorflow library with strideX=1, strideY=1.
armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
- QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
+ QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
+ {
1062, 1580, 1850, 1530, 1117,
2140, 3108, 3500, 2842, 2042,
3580, 5068, 5460, 4342, 3062,
3618, 5072, 5390, 4248, 2971,
3074, 4282, 4510, 3533, 2457,
+
1550, 2284, 2362, 1955, 1428,
2910, 4206, 4342, 3528, 2536,
3390, 4886, 5022, 4068, 2916,
@@ -972,43 +976,29 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
int32_t qOffset,
bool biasEnabled)
{
- armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
+ auto layout = armnn::DataLayout::NHWC;
+
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
- QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
- 0, 25,
- 1, 26,
- 2, 27,
- 3, 28,
- 4, 29,
-
- 5, 30,
- 6, 31,
- 7, 32,
- 8, 33,
- 9, 34,
-
- 10, 35,
- 11, 36,
- 12, 37,
- 13, 38,
- 14, 39,
-
- 15, 40,
- 16, 41,
- 17, 42,
- 18, 43,
- 19, 44,
-
- 20, 45,
- 21, 46,
- 22, 47,
- 23, 48,
- 24, 49
+ QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
+ {
+ 0, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24,
+
+ 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49
})));
armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
- QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
+ QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
+ {
32, 31, 30, 29,
28, 27, 26, 25,
24, 23, 22, 21,
@@ -1020,41 +1010,24 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
4, 3, 2, 1
})));
- armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
- QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
- 1062, 1550,
- 1580, 2284,
- 1850, 2362,
- 1530, 1955,
- 1117, 1428,
-
- 2140, 2910,
- 3108, 4206,
- 3500, 4342,
- 2842, 3528,
- 2042, 2536,
-
- 3580, 3390,
- 5068, 4886,
- 5460, 5022,
- 4342, 4068,
- 3062, 2916,
-
- 3618, 3566,
- 5072, 5056,
- 5390, 5182,
- 4248, 4133,
- 2971, 2922,
-
- 3074, 3100,
- 4282, 4352,
- 4510, 4452,
- 3533, 3517,
- 2457, 2465
+ QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
+ {
+ 1062, 1580, 1850, 1530, 1117,
+ 2140, 3108, 3500, 2842, 2042,
+ 3580, 5068, 5460, 4342, 3062,
+ 3618, 5072, 5390, 4248, 2971,
+ 3074, 4282, 4510, 3533, 2457,
+
+ 1550, 2284, 2362, 1955, 1428,
+ 2910, 4206, 4342, 3528, 2536,
+ 3390, 4886, 5022, 4068, 2916,
+ 3566, 5056, 5182, 4133, 2922,
+ 3100, 4352, 4452, 3517, 2465
})));
- return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
+ return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
input,
@@ -1063,6 +1036,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
expectedOutput,
qScale,
qOffset,
+ layout,
1, // Padding left.
1, // Padding top.
2, // Padding right.
@@ -1080,9 +1054,12 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
int32_t qOffset,
bool biasEnabled)
{
- armnn::TensorInfo inputTensorInfo({ 1, 9, 9, 1}, ArmnnType);
+ auto layout = armnn::DataLayout::NHWC;
+
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
- QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
+ QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
+ {
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -1096,7 +1073,8 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
- QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
+ QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
+ {
1, 2, 3,
4, 5, 6,
7, 8, 9
@@ -1112,15 +1090,16 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
uint32_t dilationY = 3;
// Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
- armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
- QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
+ QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
+ {
5, 5, 5,
5, 5, 5,
5, 5, 5
})));
- return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
+ return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
input,
@@ -1129,6 +1108,7 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
expectedOutput,
qScale,
qOffset,
+ layout,
padLeft,
padTop,
padRight,
@@ -1139,6 +1119,269 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
dilationY);
}
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const std::vector<float>& inputNoQuantizedValues,
+ armnn::TensorInfo& inputTensorInfo,
+ const std::vector<float>& kernelNoQuantizedValues,
+ armnn::TensorInfo& kernelTensorInfo,
+ const std::vector<float>& outputExpectedNoQuantizedValues,
+ armnn::TensorInfo& outputTensorInfo,
+ uint32_t dilationX,
+ uint32_t dilationY,
+ armnn::DataLayout layout = armnn::DataLayout::NCHW,
+ bool biasEnabled = false)
+{
+ float qScale;
+ int32_t qOffset;
+ switch (ArmnnType)
+ {
+ case armnn::DataType::QuantisedAsymm8:
+ {
+ qScale = 0.1f;
+ qOffset = 128;
+ break;
+ }
+ case armnn::DataType::QuantisedSymm16:
+ {
+ qScale = 0.1f;
+ qOffset = 0;
+ break;
+ }
+ case armnn::DataType::Float32:
+ default:
+ {
+ qScale = 0.f;
+ qOffset = 0;
+ break;
+ }
+ }
+
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ kernelTensorInfo.SetQuantizationScale(qScale);
+ kernelTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo,
+ std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset(),
+ inputNoQuantizedValues)));
+ auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
+ std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
+ kernelTensorInfo.GetQuantizationOffset(),
+ kernelNoQuantizedValues)));
+ auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
+ std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset(),
+ outputExpectedNoQuantizedValues)));
+
+ uint32_t padLeft = 0;
+ uint32_t padTop = 0;
+ uint32_t padRight = 0;
+ uint32_t padBottom = 0;
+ uint32_t strideX = 1;
+ uint32_t strideY = 1;
+
+ return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout,
+ padLeft,
+ padTop,
+ padRight,
+ padBottom,
+ strideX,
+ strideY,
+ dilationX,
+ dilationY);
+}
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
+LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout)
+{
+ armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
+ std::vector<float> inputNoQuantizedValues =
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
+ std::vector<float> kernelNoQuantizedValues =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9
+ };
+
+ // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
+ // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
+ std::vector<float> outputExpectedNoQuantizedValues =
+ {
+ 6., 5., 5., 5.,
+ 6., 5., 5., 5.,
+ 6., 5., 5., 5.,
+ 3., 2., 2., 2.
+ };
+
+ return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ inputNoQuantizedValues,
+ inputTensorInfo,
+ kernelNoQuantizedValues,
+ kernelTensorInfo,
+ outputExpectedNoQuantizedValues,
+ outputTensorInfo,
+ 3,
+ 3,
+ layout,
+ biasEnabled);
+}
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
+LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout)
+{
+ armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
+ std::vector<float> inputNoQuantizedValues =
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
+ std::vector<float> kernelNoQuantizedValues =
+ {
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9,
+
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9
+ };
+
+ // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
+ // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
+ armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
+ std::vector<float> outputExpectedNoQuantizedValues =
+ {
+ 6., 5., 5., 5.,
+ 6., 5., 5., 5.,
+ 6., 5., 5., 5.,
+ 3., 2., 2., 2.,
+
+ 6., 5., 5., 5.,
+ 6., 5., 5., 5.,
+ 6., 5., 5., 5.,
+ 3., 2., 2., 2.
+ };
+
+ return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ inputNoQuantizedValues,
+ inputTensorInfo,
+ kernelNoQuantizedValues,
+ kernelTensorInfo,
+ outputExpectedNoQuantizedValues,
+ outputTensorInfo,
+ 3,
+ 3,
+ layout,
+ biasEnabled);
+}
+
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
LayerTestResult<float, 4> DepthwiseConvolution2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1203,11 +1446,11 @@ LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory,
- memoryManager,
- 0.f,
- 0,
- false);
+ workloadFactory,
+ memoryManager,
+ 0.f,
+ 0,
+ false);
}
LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index e0b0273d9d..25ccfa09f0 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -127,6 +127,20 @@ LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
bool biasEnabled,
const armnn::DataLayout layout);
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -144,8 +158,9 @@ LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
bool biasEnabled,
const armnn::DataLayout layout);
-LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index ccb1dc2d5d..cb9ee4b5a0 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -138,9 +138,68 @@ ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dUint8Nhwc,
DepthwiseConvolution2dUint8Test,
false,
armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
ARMNN_AUTO_TEST_CASE(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3,
+ DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>,
+ false,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Nhwc,
+ DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>,
+ false,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8,
+ DepthwiseConvolution2d3x3Dilation3x3Test
+ <armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>,
+ false,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcUint8,
+ DepthwiseConvolution2d3x3Dilation3x3Test
+ <armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>,
+ false,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int16,
+ DepthwiseConvolution2d3x3Dilation3x3Test
+ <armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>,
+ false,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt16,
+ DepthwiseConvolution2d3x3Dilation3x3Test
+ <armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>,
+ false,
+ armnn::DataLayout::NHWC)
+
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test
+ <armnn::DataType::Float32, armnn::DataType::Float32>,
+ false,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Nhwc,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>,
+ false,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test
+ <armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>,
+ false,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcUint8,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test
+ <armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>,
+ false,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int16,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test
+ <armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>,
+ false,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt16,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test
+ <armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>,
+ false,
+ armnn::DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, true, armnn::DataLayout::NCHW)
@@ -173,7 +232,6 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetricNhwc,
ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
DepthwiseConvolution2dAsymmetricTest, false, armnn::DataLayout::NHWC)
-
// Pooling
//MaxPooling
ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize2x2Stride2x2, SimpleMaxPooling2dSize2x2Stride2x2Test, false)