diff options
author | Jan Eilers <jan.eilers@arm.com> | 2021-06-02 12:01:25 +0100 |
---|---|---|
committer | Jan Eilers <jan.eilers@arm.com> | 2021-06-16 11:31:42 +0000 |
commit | 53ef79504b4c881c572735393c2eede5fa556c46 (patch) | |
tree | f6e0cd27c4d03075fa154074c5b12d7c8c3149f7 /src/armnn/test/optimizations | |
parent | 77fe76bfa8cb798943821d1f3e432c228e1cdee3 (diff) | |
download | armnn-53ef79504b4c881c572735393c2eede5fa556c46.tar.gz |
IVGCVSW-5826 Change weights layout for depthwise to [1,H,W,I*M]
* This change is necessary because tflite uses a [1,H,W,I*M] format
and uses the I*M dimension for per axis quantization. Our previous
layout [M,I,H,W] can't handle the correlating quantization scales.
* Updates Onnx-, TfLiteParser and TfliteDelegate
* Updates the CpuRef, CpuAcc and GpuAcc backends
* Adjusts unit tests
* Adds test to ensure models with old layout can still be read and
executed
* Adds conversion function to previous layout [1,H,W,I*M] --> [M,I,H,W]
which can be used by backend developers
!android-nn-driver:5553
Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ifef23368b8c3702cf315a5838d214f7dc13c0152
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r-- | src/armnn/test/optimizations/FoldPadTests.cpp | 2 | ||||
-rw-r--r-- | src/armnn/test/optimizations/FuseActivationTests.cpp | 6 | ||||
-rw-r--r-- | src/armnn/test/optimizations/FuseBatchNormTests.cpp | 12 |
3 files changed, 10 insertions, 10 deletions
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index 7b4ac4170f..11f09e80e0 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -687,7 +687,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp // avoided. The output tensors of each should match. const unsigned int inputShape[] = {1, 4, 4, 3}; // NHWCin const unsigned int paddedShape[] = {1, 6, 6, 3}; - const unsigned int weightsShape[] = {4, 3, 2, 2}; // MCinHW + const unsigned int weightsShape[] = {1, 2, 2, 12}; // 1HWCout const unsigned int outputShape[] = {1, 5, 5, 12}; // NHWCout std::vector<float> inputData({2.0f, 2.0f, 6.0f, 6.0f, diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp index 9e332136f6..35b5bbc2da 100644 --- a/src/armnn/test/optimizations/FuseActivationTests.cpp +++ b/src/armnn/test/optimizations/FuseActivationTests.cpp @@ -81,9 +81,9 @@ public: using LayerType = DepthwiseConvolution2dLayer; static const bool isElementWise = false; - static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // NHWCin - static TensorShape GetOutputShape() { return TensorShape( {1, 3, 3, 12}); } // NHWCout - static TensorShape GetWeightsShape() { return TensorShape( {4, 3, 2, 2}); } // MCinHW + static TensorShape GetInputShape() { return TensorShape( {1, 4, 4, 3}); } // [N,H,W,Cin] + static TensorShape GetOutputShape() { return TensorShape( {1, 3, 3, 12}); } // [N,H,W,Cout] + static TensorShape GetWeightsShape() { return TensorShape( {1, 2, 2, 12}); } // [1,H,W,Cout] constexpr static const unsigned int inputSize = 48; //batchIn * heightIn * widthIn * channelIn; constexpr static const unsigned int outputSize = 108; //batchOut * heightOut * widthOut * channelOut; diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp index 671f565054..20d2940b81 100644 --- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp +++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp @@ -90,12 +90,12 @@ INetworkPtr CreatNetwork(bool depthwise, bool preventFusing) if (depthwise) { - //M Cin H W - weightsDimensionSizes[0] = 4; - weightsDimensionSizes[1] = 3; + // [1, H, W, Cout] + weightsDimensionSizes[0] = 1; + weightsDimensionSizes[1] = 2; weightsDimensionSizes[2] = 2; - weightsDimensionSizes[3] = 2; - outputDimensionSizes[3] = weightsDimensionSizes[0] * weightsDimensionSizes[1]; + weightsDimensionSizes[3] = 12; + outputDimensionSizes[3] = weightsDimensionSizes[3]; } const unsigned int outputChannelSize[] = {outputDimensionSizes[3]}; // Cout @@ -295,7 +295,7 @@ TEST_CASE("FuseBatchNormIntoDepthwiseConv2DFloat32Test") TEST_CASE("FuseBatchNormIntoDepthwiseConv2DFloat16Test") { - FuseBatchNormIntoConvTest<DepthwiseConv2dTest, DataType::Float16>(true, 0.1f,armnn::Compute::CpuRef); + FuseBatchNormIntoConvTest<DepthwiseConv2dTest, DataType::Float16>(true, 0.2f,armnn::Compute::CpuRef); } #endif |