aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/test/NeonLayerTests.cpp
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2021-06-02 12:01:25 +0100
committerJan Eilers <jan.eilers@arm.com>2021-06-16 11:31:42 +0000
commit53ef79504b4c881c572735393c2eede5fa556c46 (patch)
treef6e0cd27c4d03075fa154074c5b12d7c8c3149f7 /src/backends/neon/test/NeonLayerTests.cpp
parent77fe76bfa8cb798943821d1f3e432c228e1cdee3 (diff)
downloadarmnn-53ef79504b4c881c572735393c2eede5fa556c46.tar.gz
IVGCVSW-5826 Change weights layout for depthwise to [1,H,W,I*M]
* This change is necessary because tflite uses a [1,H,W,I*M] format and uses the I*M dimension for per axis quantization. Our previous layout [M,I,H,W] can't handle the correlating quantization scales. * Updates Onnx-, TfLiteParser and TfliteDelegate * Updates the CpuRef, CpuAcc and GpuAcc backends * Adjusts unit tests * Adds test to ensure models with old layout can still be read and executed * Adds conversion function to previous layout [1,H,W,I*M] --> [M,I,H,W] which can be used by backend developers !android-nn-driver:5553 Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: Ifef23368b8c3702cf315a5838d214f7dc13c0152
Diffstat (limited to 'src/backends/neon/test/NeonLayerTests.cpp')
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp16
1 files changed, 10 insertions, 6 deletions
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index edc8cb995c..62864f82dc 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -216,6 +216,11 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
// Depthwise Convolution
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d, DepthwiseConvolution2dTest, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, true, DataLayout::NCHW)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2d, DepthwiseConvolution2dTest, false, DataLayout::NCHW)
+
ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1,
@@ -291,16 +296,15 @@ TensorInfo CreateOutputTensorInfo(const TensorInfo& inputInfo,
unsigned int inHeight = inputShape[2];
unsigned int inBatchSize = inputShape[0];
- unsigned int filterWidth = filterShape[3];
+ unsigned int filterWidth = filterShape[2];
unsigned int readWidth = (inWidth + descriptor.m_PadLeft + descriptor.m_PadRight) - (filterWidth);
unsigned int outWidth = 1u + (readWidth / descriptor.m_StrideX);
- unsigned int filterHeight = filterShape[2];
+ unsigned int filterHeight = filterShape[1];
unsigned int readHeight = (inHeight + descriptor.m_PadTop + descriptor.m_PadBottom) - (filterHeight);
unsigned int outHeight = 1u + (readHeight / descriptor.m_StrideY);
- unsigned int depthMultiplier = filterShape[0];
- unsigned int outChannels = filterShape[1] * depthMultiplier;
+ unsigned int outChannels = filterShape[3];
unsigned int outBatchSize = inBatchSize;
TensorShape outputShape({outBatchSize, outChannels, outHeight, outWidth});
@@ -314,7 +318,7 @@ TEST_CASE("DepthwiseConv2dUtils")
TensorInfo inputInfo({1, 1, 10, 10 }, dataType);
TensorInfo outputInfo;
- TensorInfo weightsInfo3x3({ 1, 1, 3, 3 }, dataType);
+ TensorInfo weightsInfo3x3({ 1, 3, 3, 1 }, dataType); // [1,H,W,I*M]
TensorInfo biasesInfo;
DepthwiseConvolution2dDescriptor descriptor;
@@ -380,7 +384,7 @@ TEST_CASE("DepthwiseConv2dUtils")
weightsInfo1x1, biasesInfo));
// Supported shape 2x2
- TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, DataType::Float32);
+ TensorInfo weightsInfo2x2({ 1, 2, 2, 1 }, DataType::Float32);
descriptor = MakeDepthwiseConv2dDesc(1, 1);
outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo2x2, descriptor, dataType);
CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,