From 649dd9515ddf4bd00a0bff64d51dfd835a6c7b39 Mon Sep 17 00:00:00 2001 From: Nattapat Chaimanowong Date: Tue, 22 Jan 2019 16:10:44 +0000 Subject: IVGCVSW-2467 Remove GetDataType function Change-Id: I7359617a307b9abb4c30b3d5f2364dc6d0f828f0 --- CMakeLists.txt | 1 + include/armnn/TypesUtils.hpp | 46 +- src/armnn/CompatibleTypes.hpp | 44 + src/armnn/test/UtilsTests.cpp | 11 - src/armnnTfLiteParser/test/Activations.cpp | 8 +- src/armnnTfLiteParser/test/Addition.cpp | 10 +- src/armnnTfLiteParser/test/AvgPool2D.cpp | 9 +- src/armnnTfLiteParser/test/Concatenation.cpp | 97 +- src/armnnTfLiteParser/test/Conv2D.cpp | 8 +- .../test/DepthwiseConvolution2D.cpp | 6 +- src/armnnTfLiteParser/test/FullyConnected.cpp | 4 +- src/armnnTfLiteParser/test/MaxPool2D.cpp | 9 +- src/armnnTfLiteParser/test/Mean.cpp | 5 +- src/armnnTfLiteParser/test/Multiplication.cpp | 25 +- src/armnnTfLiteParser/test/Pad.cpp | 14 +- .../test/ParserFlatbuffersFixture.hpp | 38 +- src/armnnTfLiteParser/test/Reshape.cpp | 24 +- src/armnnTfLiteParser/test/Softmax.cpp | 3 +- src/armnnTfLiteParser/test/Squeeze.cpp | 4 +- src/armnnTfParser/TfParser.cpp | 2 +- src/armnnUtils/TensorUtils.cpp | 19 + src/armnnUtils/TensorUtils.hpp | 20 +- src/backends/backendsCommon/CpuTensorHandle.hpp | 5 +- .../backendsCommon/test/ActivationTestImpl.hpp | 81 +- .../backendsCommon/test/ArithmeticTestImpl.hpp | 10 +- .../backendsCommon/test/BatchNormTestImpl.hpp | 17 +- .../backendsCommon/test/Conv2dTestImpl.hpp | 108 +- src/backends/backendsCommon/test/DebugTestImpl.hpp | 24 +- .../backendsCommon/test/FullyConnectedTestImpl.hpp | 11 +- src/backends/backendsCommon/test/LayerTests.cpp | 1364 +++++++++++--------- src/backends/backendsCommon/test/LayerTests.hpp | 12 + src/backends/backendsCommon/test/LstmTestImpl.hpp | 64 +- .../backendsCommon/test/MergerTestImpl.hpp | 21 +- .../backendsCommon/test/Pooling2dTestImpl.hpp | 177 +-- .../backendsCommon/test/SoftmaxTestImpl.hpp | 12 +- .../backendsCommon/test/SpaceToBatchNdTestImpl.hpp | 40 +- .../backendsCommon/test/SplitterTestImpl.hpp | 16 +- .../backendsCommon/test/StridedSliceTestImpl.hpp | 55 +- src/backends/cl/test/ClEndToEndTests.cpp | 12 +- src/backends/cl/test/ClLayerTests.cpp | 8 +- src/backends/cl/test/OpenClTimerTest.cpp | 8 +- src/backends/neon/test/NeonEndToEndTests.cpp | 12 +- src/backends/neon/test/NeonLayerTests.cpp | 8 +- src/backends/neon/test/NeonTimerTest.cpp | 4 +- src/backends/reference/test/RefEndToEndTests.cpp | 34 +- 45 files changed, 1318 insertions(+), 1192 deletions(-) create mode 100644 src/armnn/CompatibleTypes.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 87d91b96b4..682e2cf688 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -255,6 +255,7 @@ list(APPEND armnn_sources src/armnn/layers/SubtractionLayer.cpp src/armnn/layers/SubtractionLayer.hpp src/armnn/BackendSettings.hpp + src/armnn/CompatibleTypes.hpp src/armnn/Descriptors.cpp src/armnn/DeviceSpec.hpp src/armnn/Exceptions.cpp diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp index 8c4ceb8d4f..3ed1dfbcb5 100644 --- a/include/armnn/TypesUtils.hpp +++ b/include/armnn/TypesUtils.hpp @@ -151,45 +151,6 @@ struct IsHalfType : std::integral_constant::value && sizeof(T) == 2> {}; -template -struct GetDataTypeImpl; - -template -struct GetDataTypeImpl::value, T>> -{ - static constexpr DataType Value = DataType::Float16; -}; - -template<> -struct GetDataTypeImpl -{ - static constexpr DataType Value = DataType::Float32; -}; - -template<> -struct GetDataTypeImpl -{ - static constexpr DataType Value = DataType::QuantisedAsymm8; -}; - -template<> -struct GetDataTypeImpl -{ - static constexpr DataType Value = DataType::Signed32; -}; - -template<> -struct GetDataTypeImpl -{ - static constexpr DataType Value = DataType::Boolean; -}; - -template -constexpr DataType GetDataType() -{ - return GetDataTypeImpl::Value; -} - template constexpr bool IsQuantizedType() { @@ -257,16 +218,15 @@ inline float Dequantize(QuantizedType value, float scale, int32_t offset) return dequantized; } -template +template void VerifyTensorInfoDataType(const armnn::TensorInfo & info) { - auto expectedType = armnn::GetDataType(); - if (info.GetDataType() != expectedType) + if (info.GetDataType() != DataType) { std::stringstream ss; ss << "Unexpected datatype:" << armnn::GetDataTypeName(info.GetDataType()) << " for tensor:" << info.GetShape() - << ". The type expected to be: " << armnn::GetDataTypeName(expectedType); + << ". The type expected to be: " << armnn::GetDataTypeName(DataType); throw armnn::Exception(ss.str()); } } diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp new file mode 100644 index 0000000000..2449876544 --- /dev/null +++ b/src/armnn/CompatibleTypes.hpp @@ -0,0 +1,44 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "armnn/Types.hpp" +#include "Half.hpp" + +namespace armnn +{ + +template +bool CompatibleTypes(DataType dataType) +{ + return false; +} + +template<> +inline bool CompatibleTypes(DataType dataType) +{ + return dataType == DataType::Float32; +} + +template<> +inline bool CompatibleTypes(DataType dataType) +{ + return dataType == DataType::Float16; +} + +template<> +inline bool CompatibleTypes(DataType dataType) +{ + return dataType == DataType::Boolean || dataType == DataType::QuantisedAsymm8; +} + +template<> +inline bool CompatibleTypes(DataType dataType) +{ + return dataType == DataType::Signed32; +} + +} //namespace armnn diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp index 9933137edc..c81a4b67b6 100644 --- a/src/armnn/test/UtilsTests.cpp +++ b/src/armnn/test/UtilsTests.cpp @@ -23,14 +23,6 @@ BOOST_AUTO_TEST_CASE(DataTypeSize) BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1); } -BOOST_AUTO_TEST_CASE(GetDataTypeTest) -{ - BOOST_TEST((armnn::GetDataType() == armnn::DataType::Float32)); - BOOST_TEST((armnn::GetDataType() == armnn::DataType::QuantisedAsymm8)); - BOOST_TEST((armnn::GetDataType() == armnn::DataType::Signed32)); - BOOST_TEST((armnn::GetDataType() == armnn::DataType::Boolean)); -} - BOOST_AUTO_TEST_CASE(PermuteDescriptorWithTooManyMappings) { BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 4u }), armnn::InvalidArgumentException); @@ -81,9 +73,6 @@ BOOST_AUTO_TEST_CASE(HalfType) constexpr bool isHalfType = std::is_same::value; BOOST_CHECK(isHalfType); - armnn::DataType dt = armnn::GetDataType(); - BOOST_CHECK(dt == armnn::DataType::Float16); - //Test utility functions return correct size BOOST_CHECK(GetDataTypeSize(armnn::DataType::Float16) == 2); diff --git a/src/armnnTfLiteParser/test/Activations.cpp b/src/armnnTfLiteParser/test/Activations.cpp index a30d46408c..534ae4cb73 100644 --- a/src/armnnTfLiteParser/test/Activations.cpp +++ b/src/armnnTfLiteParser/test/Activations.cpp @@ -70,8 +70,8 @@ struct ReLuFixture : ActivationFixture }; BOOST_FIXTURE_TEST_CASE(ParseReLu, ReLuFixture) { - RunTest<2, float>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f }, - { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f }); + RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f }, + { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f }); } struct ReLu6Fixture : ActivationFixture @@ -80,8 +80,8 @@ struct ReLu6Fixture : ActivationFixture }; BOOST_FIXTURE_TEST_CASE(ParseReLu6, ReLu6Fixture) { - RunTest<2, float>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f }, - { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f }); + RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f }, + { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f }); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfLiteParser/test/Addition.cpp b/src/armnnTfLiteParser/test/Addition.cpp index 53a0c40337..94389d3134 100644 --- a/src/armnnTfLiteParser/test/Addition.cpp +++ b/src/armnnTfLiteParser/test/Addition.cpp @@ -97,11 +97,11 @@ struct SimpleAddFixture : AddFixture BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture) { - RunTest<2, uint8_t>(0, - {{"inputTensor1", { 0, 1, 2, 3 }}, - {"inputTensor2", { 4, 5, 6, 7 }}}, - {{"outputTensor", { 4, 6, 8, 10 }}}); + RunTest<2, armnn::DataType::QuantisedAsymm8>( + 0, + {{"inputTensor1", { 0, 1, 2, 3 }}, + {"inputTensor2", { 4, 5, 6, 7 }}}, + {{"outputTensor", { 4, 6, 8, 10 }}}); } BOOST_AUTO_TEST_SUITE_END() - diff --git a/src/armnnTfLiteParser/test/AvgPool2D.cpp b/src/armnnTfLiteParser/test/AvgPool2D.cpp index 2fac9079c8..a39c088d44 100644 --- a/src/armnnTfLiteParser/test/AvgPool2D.cpp +++ b/src/armnnTfLiteParser/test/AvgPool2D.cpp @@ -98,22 +98,23 @@ struct AvgPoolLiteFixture2DOutput : AvgPool2DFixture BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput) { - RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 }); + RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 }); } BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput) { - RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f }); + RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f }); } BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput) { - RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 }); + RunTest<4, armnn::DataType::QuantisedAsymm8>( + 0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 }); } BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput) { - BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception); + BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfLiteParser/test/Concatenation.cpp b/src/armnnTfLiteParser/test/Concatenation.cpp index 8629efe3d7..bb5aebf39c 100644 --- a/src/armnnTfLiteParser/test/Concatenation.cpp +++ b/src/armnnTfLiteParser/test/Concatenation.cpp @@ -100,10 +100,11 @@ struct ConcatenationFixtureNegativeDim : ConcatenationFixture BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim) { - RunTest<4, uint8_t>(0, - {{"inputTensor1", { 0, 1, 2, 3 }}, - {"inputTensor2", { 4, 5, 6, 7 }}}, - {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}}); + RunTest<4, armnn::DataType::QuantisedAsymm8>( + 0, + {{"inputTensor1", { 0, 1, 2, 3 }}, + {"inputTensor2", { 4, 5, 6, 7 }}}, + {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}}); } struct ConcatenationFixtureNCHW : ConcatenationFixture @@ -113,10 +114,11 @@ struct ConcatenationFixtureNCHW : ConcatenationFixture BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW) { - RunTest<4, uint8_t>(0, - {{"inputTensor1", { 0, 1, 2, 3 }}, - {"inputTensor2", { 4, 5, 6, 7 }}}, - {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}}); + RunTest<4, armnn::DataType::QuantisedAsymm8>( + 0, + {{"inputTensor1", { 0, 1, 2, 3 }}, + {"inputTensor2", { 4, 5, 6, 7 }}}, + {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}}); } struct ConcatenationFixtureNHWC : ConcatenationFixture @@ -126,10 +128,11 @@ struct ConcatenationFixtureNHWC : ConcatenationFixture BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC) { - RunTest<4, uint8_t>(0, - {{"inputTensor1", { 0, 1, 2, 3 }}, - {"inputTensor2", { 4, 5, 6, 7 }}}, - {{"outputTensor", { 0, 1, 4, 5, 2, 3, 6, 7 }}}); + RunTest<4, armnn::DataType::QuantisedAsymm8>( + 0, + {{"inputTensor1", { 0, 1, 2, 3 }}, + {"inputTensor2", { 4, 5, 6, 7 }}}, + {{"outputTensor", { 0, 1, 4, 5, 2, 3, 6, 7 }}}); } struct ConcatenationFixtureDim1 : ConcatenationFixture @@ -139,15 +142,16 @@ struct ConcatenationFixtureDim1 : ConcatenationFixture BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1) { - RunTest<4, uint8_t>(0, - { { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, - { "inputTensor2", { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } }, - { { "outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, - 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } }); + RunTest<4, armnn::DataType::QuantisedAsymm8>( + 0, + { { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } }, + { "inputTensor2", { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } }, + { { "outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } }); } struct ConcatenationFixtureDim3 : ConcatenationFixture @@ -157,31 +161,32 @@ struct ConcatenationFixtureDim3 : ConcatenationFixture BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3) { - RunTest<4, uint8_t>(0, - { { "inputTensor1", { 0, 1, 2, 3, - 4, 5, 6, 7, - 8, 9, 10, 11, - 12, 13, 14, 15, - 16, 17, 18, 19, - 20, 21, 22, 23 } }, - { "inputTensor2", { 50, 51, 52, 53, - 54, 55, 56, 57, - 58, 59, 60, 61, - 62, 63, 64, 65, - 66, 67, 68, 69, - 70, 71, 72, 73 } } }, - { { "outputTensor", { 0, 1, 2, 3, - 50, 51, 52, 53, - 4, 5, 6, 7, - 54, 55, 56, 57, - 8, 9, 10, 11, - 58, 59, 60, 61, - 12, 13, 14, 15, - 62, 63, 64, 65, - 16, 17, 18, 19, - 66, 67, 68, 69, - 20, 21, 22, 23, - 70, 71, 72, 73 } } }); + RunTest<4, armnn::DataType::QuantisedAsymm8>( + 0, + { { "inputTensor1", { 0, 1, 2, 3, + 4, 5, 6, 7, + 8, 9, 10, 11, + 12, 13, 14, 15, + 16, 17, 18, 19, + 20, 21, 22, 23 } }, + { "inputTensor2", { 50, 51, 52, 53, + 54, 55, 56, 57, + 58, 59, 60, 61, + 62, 63, 64, 65, + 66, 67, 68, 69, + 70, 71, 72, 73 } } }, + { { "outputTensor", { 0, 1, 2, 3, + 50, 51, 52, 53, + 4, 5, 6, 7, + 54, 55, 56, 57, + 8, 9, 10, 11, + 58, 59, 60, 61, + 12, 13, 14, 15, + 62, 63, 64, 65, + 16, 17, 18, 19, + 66, 67, 68, 69, + 20, 21, 22, 23, + 70, 71, 72, 73 } } }); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp index 79bef733c9..38c6675ddb 100644 --- a/src/armnnTfLiteParser/test/Conv2D.cpp +++ b/src/armnnTfLiteParser/test/Conv2D.cpp @@ -89,7 +89,7 @@ struct SimpleConv2DFixture : public ParserFlatbuffersFixture BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture ) { - RunTest<4, uint8_t>( + RunTest<4, armnn::DataType::QuantisedAsymm8>( 0, { 1, 2, 3, @@ -219,7 +219,7 @@ struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture ) { - RunTest<4, uint8_t>( + RunTest<4, armnn::DataType::QuantisedAsymm8>( 0, { 1, 2, @@ -290,7 +290,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture uint8_t outZero = 20; uint8_t fz = 4; // filter zero point - RunTest<4, uint8_t>( + RunTest<4, armnn::DataType::QuantisedAsymm8>( 0, { 1, 2, @@ -331,7 +331,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixtu { uint8_t relu6Min = 6 / 2; // divide by output scale - RunTest<4, uint8_t>( + RunTest<4, armnn::DataType::QuantisedAsymm8>( 0, { 1, 2, diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp index e8262f8313..c0767801b3 100644 --- a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp +++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp @@ -133,7 +133,7 @@ struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture) { - RunTest<4, uint8_t>( + RunTest<4, armnn::DataType::QuantisedAsymm8>( 0, { 0, 1, 2, 3, 4, 5, @@ -160,7 +160,7 @@ struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture) { - RunTest<4, uint8_t>( + RunTest<4, armnn::DataType::QuantisedAsymm8>( 0, { 0, 1, 2, 3, 4, 5, @@ -185,7 +185,7 @@ struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture) { - RunTest<4, uint8_t>( + RunTest<4, armnn::DataType::QuantisedAsymm8>( 0, { 0, 1, 2, 3, 4, 5, diff --git a/src/armnnTfLiteParser/test/FullyConnected.cpp b/src/armnnTfLiteParser/test/FullyConnected.cpp index 14ca57c2ab..7ee64a476e 100644 --- a/src/armnnTfLiteParser/test/FullyConnected.cpp +++ b/src/armnnTfLiteParser/test/FullyConnected.cpp @@ -125,7 +125,7 @@ struct FullyConnectedWithNoBiasFixture : FullyConnectedFixture BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture) { - RunTest<2, uint8_t>( + RunTest<2, armnn::DataType::QuantisedAsymm8>( 0, { 10, 20, 30, 40 }, { 400/2 }); @@ -145,7 +145,7 @@ struct FullyConnectedWithBiasFixture : FullyConnectedFixture BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture) { - RunTest<2, uint8_t>( + RunTest<2, armnn::DataType::QuantisedAsymm8>( 0, { 10, 20, 30, 40 }, { (400+10)/2 }); diff --git a/src/armnnTfLiteParser/test/MaxPool2D.cpp b/src/armnnTfLiteParser/test/MaxPool2D.cpp index 06bf7806cc..759fc37ccd 100644 --- a/src/armnnTfLiteParser/test/MaxPool2D.cpp +++ b/src/armnnTfLiteParser/test/MaxPool2D.cpp @@ -98,22 +98,23 @@ struct MaxPoolLiteFixtureUint2DOutput : MaxPool2DFixture BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput) { - RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 }); + RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 }); } BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput) { - RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f }); + RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f }); } BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput) { - RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 }); + RunTest<4, armnn::DataType::QuantisedAsymm8>( + 0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 }); } BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput) { - BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception); + BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnTfLiteParser/test/Mean.cpp b/src/armnnTfLiteParser/test/Mean.cpp index d7cb2a20f0..3f0fdf14d0 100644 --- a/src/armnnTfLiteParser/test/Mean.cpp +++ b/src/armnnTfLiteParser/test/Mean.cpp @@ -91,9 +91,8 @@ struct SimpleMeanNoReduceFixture : public MeanNoReduceFixture BOOST_FIXTURE_TEST_CASE(ParseMeanNoReduce, SimpleMeanNoReduceFixture) { - RunTest<2, float>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } }, - {{ "outputTensor", { 1.5f } } }); + RunTest<2, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } }, + {{ "outputTensor", { 1.5f } } }); } BOOST_AUTO_TEST_SUITE_END() - diff --git a/src/armnnTfLiteParser/test/Multiplication.cpp b/src/armnnTfLiteParser/test/Multiplication.cpp index 802799c2b4..f7e2edd546 100644 --- a/src/armnnTfLiteParser/test/Multiplication.cpp +++ b/src/armnnTfLiteParser/test/Multiplication.cpp @@ -94,19 +94,18 @@ struct SimpleMultiplicationFixture : public MultiplicationFixture BOOST_FIXTURE_TEST_CASE(ParseMultiplication, SimpleMultiplicationFixture) { - RunTest<4, float>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f, - 3.0f, 4.0f, 5.0f, - 6.0f, 7.0f, 8.0f, - 9.0f, 10.0f, 11.0f } }, - { "inputTensor2", { 1.0f, 1.0f, 1.0f, - 5.0f, 5.0f, 5.0f, - 1.0f, 1.0f, 1.0f, - 5.0f, 5.0f, 5.0f} } }, - {{ "outputTensor", { 0.0f, 1.0f, 2.0f, - 15.0f, 20.0f, 25.0f, - 6.0f, 7.0f, 8.0f, - 45.0f, 50.0f, 55.0f } } }); + RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f, + 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, + 9.0f, 10.0f, 11.0f } }, + { "inputTensor2", { 1.0f, 1.0f, 1.0f, + 5.0f, 5.0f, 5.0f, + 1.0f, 1.0f, 1.0f, + 5.0f, 5.0f, 5.0f} } }, + {{ "outputTensor", { 0.0f, 1.0f, 2.0f, + 15.0f, 20.0f, 25.0f, + 6.0f, 7.0f, 8.0f, + 45.0f, 50.0f, 55.0f } } }); } BOOST_AUTO_TEST_SUITE_END() - diff --git a/src/armnnTfLiteParser/test/Pad.cpp b/src/armnnTfLiteParser/test/Pad.cpp index 09b744a7ce..bdc8478ca2 100644 --- a/src/armnnTfLiteParser/test/Pad.cpp +++ b/src/armnnTfLiteParser/test/Pad.cpp @@ -92,13 +92,13 @@ struct SimplePadFixture : public PadFixture BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture) { - RunTest<2, float>(0, - {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}}, - {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}}); + RunTest<2, armnn::DataType::Float32> + (0, + {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}}, + {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}}); } BOOST_AUTO_TEST_SUITE_END() - diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp index b372a604f3..8d0ee01aa9 100644 --- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp +++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp @@ -14,6 +14,7 @@ #include #include "test/TensorHelpers.hpp" +#include "TypeUtils.hpp" #include "armnnTfLiteParser/ITfLiteParser.hpp" #include @@ -116,14 +117,18 @@ struct ParserFlatbuffersFixture /// Executes the network with the given input tensor and checks the result against the given output tensor. /// This overload assumes the network has a single input and a single output. - template + template > void RunTest(size_t subgraphId, - const std::vector& inputData, - const std::vector& expectedOutputData); + const std::vector& inputData, + const std::vector& expectedOutputData); /// Executes the network with the given input tensors and checks the results against the given output tensors. /// This overload supports multiple inputs and multiple outputs, identified by name. - template + template > void RunTest(size_t subgraphId, const std::map>& inputData, const std::map>& expectedOutputData); @@ -152,21 +157,24 @@ struct ParserFlatbuffersFixture } }; -template +template void ParserFlatbuffersFixture::RunTest(size_t subgraphId, const std::vector& inputData, const std::vector& expectedOutputData) { - RunTest(subgraphId, - { { m_SingleInputName, inputData } }, - { { m_SingleOutputName, expectedOutputData } }); + RunTest(subgraphId, + { { m_SingleInputName, inputData } }, + { { m_SingleOutputName, expectedOutputData } }); } -template -void -ParserFlatbuffersFixture::RunTest(size_t subgraphId, - const std::map>& inputData, - const std::map>& expectedOutputData) +template +void ParserFlatbuffersFixture::RunTest(size_t subgraphId, + const std::map>& inputData, + const std::map>& expectedOutputData) { using BindingPointInfo = std::pair; @@ -175,7 +183,7 @@ ParserFlatbuffersFixture::RunTest(size_t subgraphId, for (auto&& it : inputData) { BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first); - armnn::VerifyTensorInfoDataType(bindingInfo.second); + armnn::VerifyTensorInfoDataType(bindingInfo.second); inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) }); } @@ -185,7 +193,7 @@ ParserFlatbuffersFixture::RunTest(size_t subgraphId, for (auto&& it : expectedOutputData) { BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first); - armnn::VerifyTensorInfoDataType(bindingInfo.second); + armnn::VerifyTensorInfoDataType(bindingInfo.second); outputStorage.emplace(it.first, MakeTensor(bindingInfo.second)); outputTensors.push_back( { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) }); diff --git a/src/armnnTfLiteParser/test/Reshape.cpp b/src/armnnTfLiteParser/test/Reshape.cpp index ae5a09a711..ef4b761945 100644 --- a/src/armnnTfLiteParser/test/Reshape.cpp +++ b/src/armnnTfLiteParser/test/Reshape.cpp @@ -86,9 +86,9 @@ struct ReshapeFixtureWithReshapeDims : ReshapeFixture BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims) { SetupSingleInputSingleOutput("inputTensor", "outputTensor"); - RunTest<2, uint8_t>(0, - { 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - { 1, 2, 3, 4, 5, 6, 7, 8, 9 }); + RunTest<2, armnn::DataType::QuantisedAsymm8>(0, + { 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + { 1, 2, 3, 4, 5, 6, 7, 8, 9 }); BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() == armnn::TensorShape({3,3}))); } @@ -101,9 +101,9 @@ struct ReshapeFixtureWithReshapeDimsFlatten : ReshapeFixture BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten) { SetupSingleInputSingleOutput("inputTensor", "outputTensor"); - RunTest<2, uint8_t>(0, - { 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - { 1, 2, 3, 4, 5, 6, 7, 8, 9 }); + RunTest<2, armnn::DataType::QuantisedAsymm8>(0, + { 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + { 1, 2, 3, 4, 5, 6, 7, 8, 9 }); BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() == armnn::TensorShape({1,9}))); } @@ -116,9 +116,9 @@ struct ReshapeFixtureWithReshapeDimsFlattenTwoDims : ReshapeFixture BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims) { SetupSingleInputSingleOutput("inputTensor", "outputTensor"); - RunTest<2, uint8_t>(0, - { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }, - { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }); + RunTest<2, armnn::DataType::QuantisedAsymm8>(0, + { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }, + { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }); BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() == armnn::TensorShape({2,9}))); } @@ -131,9 +131,9 @@ struct ReshapeFixtureWithReshapeDimsFlattenOneDim : ReshapeFixture BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim) { SetupSingleInputSingleOutput("inputTensor", "outputTensor"); - RunTest<3, uint8_t>(0, - { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }, - { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }); + RunTest<3, armnn::DataType::QuantisedAsymm8>(0, + { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }, + { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }); BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() == armnn::TensorShape({2,3,3}))); } diff --git a/src/armnnTfLiteParser/test/Softmax.cpp b/src/armnnTfLiteParser/test/Softmax.cpp index 957e61b944..dacd946352 100644 --- a/src/armnnTfLiteParser/test/Softmax.cpp +++ b/src/armnnTfLiteParser/test/Softmax.cpp @@ -71,8 +71,7 @@ struct SoftmaxFixture : public ParserFlatbuffersFixture BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture) { - RunTest<2, uint8_t>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 }); + RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 }); } BOOST_AUTO_TEST_SUITE_END() - diff --git a/src/armnnTfLiteParser/test/Squeeze.cpp b/src/armnnTfLiteParser/test/Squeeze.cpp index 5ee74243c4..7f6fb276fc 100644 --- a/src/armnnTfLiteParser/test/Squeeze.cpp +++ b/src/armnnTfLiteParser/test/Squeeze.cpp @@ -85,7 +85,7 @@ struct SqueezeFixtureWithSqueezeDims : SqueezeFixture BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims) { SetupSingleInputSingleOutput("inputTensor", "outputTensor"); - RunTest<3, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 }); + RunTest<3, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 }); BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() == armnn::TensorShape({2,2,1}))); @@ -99,7 +99,7 @@ struct SqueezeFixtureWithoutSqueezeDims : SqueezeFixture BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims) { SetupSingleInputSingleOutput("inputTensor", "outputTensor"); - RunTest<2, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 }); + RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 }); BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape() == armnn::TensorShape({2,2}))); } diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp index 0087ef83bf..15a91d5275 100755 --- a/src/armnnTfParser/TfParser.cpp +++ b/src/armnnTfParser/TfParser.cpp @@ -866,7 +866,7 @@ public: m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()), m_TensorInfo(tensorInfo) { - BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType()); + BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T)); } void CreateLayerDeferred() override diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp index 2c25eec163..57f823fe13 100644 --- a/src/armnnUtils/TensorUtils.cpp +++ b/src/armnnUtils/TensorUtils.cpp @@ -27,5 +27,24 @@ armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, } } +armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, + unsigned int numberOfChannels, + unsigned int height, + unsigned int width, + const armnn::DataLayout dataLayout, + const armnn::DataType dataType) +{ + switch (dataLayout) + { + case armnn::DataLayout::NCHW: + return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, dataType); + case armnn::DataLayout::NHWC: + return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, dataType); + default: + throw armnn::InvalidArgumentException("Unknown data layout [" + + std::to_string(static_cast(dataLayout)) + + "]", CHECK_LOCATION()); + } } +} diff --git a/src/armnnUtils/TensorUtils.hpp b/src/armnnUtils/TensorUtils.hpp index 6461b37f75..fb5e6eb10d 100644 --- a/src/armnnUtils/TensorUtils.hpp +++ b/src/armnnUtils/TensorUtils.hpp @@ -15,23 +15,11 @@ armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int width, const armnn::DataLayout dataLayout); -template armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, - const armnn::DataLayout dataLayout) -{ - switch (dataLayout) - { - case armnn::DataLayout::NCHW: - return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType()); - case armnn::DataLayout::NHWC: - return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType()); - default: - throw armnn::InvalidArgumentException("Unknown data layout [" - + std::to_string(static_cast(dataLayout)) + - "]", CHECK_LOCATION()); - } -} -} // namespace armnnUtils \ No newline at end of file + const armnn::DataLayout dataLayout, + const armnn::DataType dataType); + +} // namespace armnnUtils diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/CpuTensorHandle.hpp index b88a0d385b..dd6413f2e7 100644 --- a/src/backends/backendsCommon/CpuTensorHandle.hpp +++ b/src/backends/backendsCommon/CpuTensorHandle.hpp @@ -5,6 +5,7 @@ #pragma once #include "CpuTensorHandleFwd.hpp" +#include "CompatibleTypes.hpp" #include @@ -22,7 +23,7 @@ public: template const T* GetConstTensor() const { - BOOST_ASSERT(GetTensorInfo().GetDataType() == GetDataType()); + BOOST_ASSERT(CompatibleTypes(GetTensorInfo().GetDataType())); return reinterpret_cast(m_Memory); } @@ -82,7 +83,7 @@ public: template T* GetTensor() const { - BOOST_ASSERT(GetTensorInfo().GetDataType() == GetDataType()); + BOOST_ASSERT(CompatibleTypes(GetTensorInfo().GetDataType())); return reinterpret_cast(m_MutableMemory); } diff --git a/src/backends/backendsCommon/test/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/ActivationTestImpl.hpp index 46c700ce02..ca6130299b 100644 --- a/src/backends/backendsCommon/test/ActivationTestImpl.hpp +++ b/src/backends/backendsCommon/test/ActivationTestImpl.hpp @@ -19,7 +19,7 @@ #include -template +template> LayerTestResult BoundedReLuTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -41,11 +41,9 @@ LayerTestResult BoundedReLuTestCommon( unsigned int outputChannels = inputChannels; unsigned int outputBatchSize = inputBatchSize; - armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType); - armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType); if(armnn::IsQuantizedType()) { @@ -115,7 +113,7 @@ LayerTestResult BoundedReLuUpperAndLowerBoundTest( 0.999f, 1.0f, 0.89f, 1.0f, }; - return BoundedReLuTestCommon( + return BoundedReLuTestCommon( workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output, inputWidth, inputHeight, inputChannels, inputBatchSize); } @@ -146,7 +144,7 @@ LayerTestResult BoundedReLuUpperBoundOnlyTest( 0.999f, 1.2f, 0.89f, 6.0f, }; - return BoundedReLuTestCommon( + return BoundedReLuTestCommon( workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output, inputWidth, inputHeight, inputChannels, inputBatchSize); } @@ -176,10 +174,10 @@ LayerTestResult BoundedReLuUint8UpperBoundOnlyTest( float outputScale = 6.0f / 255.0f; int32_t outputOffset = 0; - return BoundedReLuTestCommon(workloadFactory, memoryManager, 6.0f, 0.0f, - inputScale, inputOffset, outputScale, outputOffset, - input, output, - inputWidth, inputHeight, inputChannels, inputBatchSize); + return BoundedReLuTestCommon( + workloadFactory, memoryManager, 6.0f, 0.0f, + inputScale, inputOffset, outputScale, outputOffset, + input, output, inputWidth, inputHeight, inputChannels, inputBatchSize); } LayerTestResult BoundedReLuUint8UpperAndLowerBoundTest( @@ -205,10 +203,10 @@ LayerTestResult BoundedReLuUint8UpperAndLowerBoundTest( int32_t inputOffset = 112; float inputScale = 0.0125f; - return BoundedReLuTestCommon(workloadFactory, memoryManager, 1.0f, -1.0f, - inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same. - input, output, - inputWidth, inputHeight, inputChannels, inputBatchSize); + return BoundedReLuTestCommon( + workloadFactory, memoryManager, 1.0f, -1.0f, + inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same. + input, output, inputWidth, inputHeight, inputChannels, inputBatchSize); } namespace @@ -303,7 +301,7 @@ LayerTestResult CompareBoundedReLuTest( return result; } -template +template> LayerTestResult ConstantLinearActivationTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -320,8 +318,8 @@ LayerTestResult ConstantLinearActivationTestCommon( unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth}; - inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -368,17 +366,18 @@ LayerTestResult ConstantLinearActivationTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return ConstantLinearActivationTestCommon(workloadFactory, memoryManager); + return ConstantLinearActivationTestCommon(workloadFactory, memoryManager); } LayerTestResult ConstantLinearActivationUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return ConstantLinearActivationTestCommon(workloadFactory, memoryManager, 4.0f, 3); + return ConstantLinearActivationTestCommon( + workloadFactory, memoryManager, 4.0f, 3); } -template +template> LayerTestResult SimpleActivationTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -400,10 +399,8 @@ LayerTestResult SimpleActivationTest( constexpr static unsigned int outputChannels = inputChannels; constexpr static unsigned int outputBatchSize = inputBatchSize; - armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -448,7 +445,7 @@ LayerTestResult SimpleActivationTest( return result; } -template +template> LayerTestResult SimpleSigmoidTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -470,32 +467,32 @@ LayerTestResult SimpleSigmoidTestCommon( std::vector outputExpectedData(inputData.size()); std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); - return SimpleActivationTest(workloadFactory, - memoryManager, - armnn::ActivationFunction::Sigmoid, - 0.f, - 0.f, - qScale, - qOffset, - inputData, - outputExpectedData); + return SimpleActivationTest(workloadFactory, + memoryManager, + armnn::ActivationFunction::Sigmoid, + 0.f, + 0.f, + qScale, + qOffset, + inputData, + outputExpectedData); } LayerTestResult SimpleSigmoidTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SimpleSigmoidTestCommon(workloadFactory, memoryManager, 0.0f, 0); + return SimpleSigmoidTestCommon(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult SimpleSigmoidUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SimpleSigmoidTestCommon(workloadFactory, memoryManager, 0.1f, 50); + return SimpleSigmoidTestCommon(workloadFactory, memoryManager, 0.1f, 50); } -template +template> LayerTestResult CompareActivationTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -517,8 +514,8 @@ LayerTestResult CompareActivationTestImpl( unsigned int shape[] = {batchSize, channels, height, width}; - inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -596,7 +593,7 @@ LayerTestResult CompareActivationTest( armnn::ActivationFunction f, unsigned int batchSize) { - return CompareActivationTestImpl( + return CompareActivationTestImpl( workloadFactory, memoryManager, refWorkloadFactory, f, batchSize); } @@ -606,6 +603,6 @@ LayerTestResult CompareActivationUint8Test( armnn::IWorkloadFactory& refWorkloadFactory, armnn::ActivationFunction f) { - return CompareActivationTestImpl( + return CompareActivationTestImpl( workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50); } diff --git a/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp b/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp index f70bf48ca9..1d6cf1d99b 100644 --- a/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp +++ b/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp @@ -4,6 +4,8 @@ // #pragma once +#include "TypeUtils.hpp" + #include #include @@ -49,7 +51,7 @@ INetworkPtr CreateArithmeticNetwork(const std::vector& inputShapes, return net; } -template +template> void ArithmeticSimpleEndToEnd(const std::vector& backends, const LayerType type, const std::vector expectedOutput) @@ -60,7 +62,7 @@ void ArithmeticSimpleEndToEnd(const std::vector& backends, const TensorShape& outputShape = { 2, 2, 2, 2 }; // Builds up the structure of the network - INetworkPtr net = CreateArithmeticNetwork()>(inputShapes, outputShape, type); + INetworkPtr net = CreateArithmeticNetwork(inputShapes, outputShape, type); BOOST_TEST_CHECKPOINT("create a network"); @@ -76,7 +78,7 @@ void ArithmeticSimpleEndToEnd(const std::vector& backends, EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); } -template +template> void ArithmeticBroadcastEndToEnd(const std::vector& backends, const LayerType type, const std::vector expectedOutput) @@ -87,7 +89,7 @@ void ArithmeticBroadcastEndToEnd(const std::vector& backends, const TensorShape& outputShape = { 1, 2, 2, 3 }; // Builds up the structure of the network - INetworkPtr net = CreateArithmeticNetwork()>(inputShapes, outputShape, type); + INetworkPtr net = CreateArithmeticNetwork(inputShapes, outputShape, type); BOOST_TEST_CHECKPOINT("create a network"); diff --git a/src/backends/backendsCommon/test/BatchNormTestImpl.hpp b/src/backends/backendsCommon/test/BatchNormTestImpl.hpp index d63f0b5610..ded4a067b4 100644 --- a/src/backends/backendsCommon/test/BatchNormTestImpl.hpp +++ b/src/backends/backendsCommon/test/BatchNormTestImpl.hpp @@ -4,6 +4,7 @@ // #pragma once +#include "TypeUtils.hpp" #include "WorkloadTestUtils.hpp" #include @@ -18,7 +19,7 @@ #include -template +template> LayerTestResult BatchNormTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -29,13 +30,13 @@ LayerTestResult BatchNormTestImpl( int32_t qOffset, armnn::DataLayout dataLayout) { - armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType); + armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType); armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout); armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] }, - armnn::GetDataType()); + ArmnnType); // Set quantization parameters if the requested type is a quantized type. if (armnn::IsQuantizedType()) @@ -102,7 +103,7 @@ LayerTestResult BatchNormTestImpl( } -template +template> LayerTestResult BatchNormTestNhwcImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -114,9 +115,9 @@ LayerTestResult BatchNormTestNhwcImpl( const unsigned int channels = 2; const unsigned int num = 1; - armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType()); - armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType); + armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType); + armnn::TensorInfo tensorInfo({channels}, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp index 8d292c84bb..24f0825504 100755 --- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp @@ -6,6 +6,7 @@ #include "WorkloadTestUtils.hpp" #include "TensorUtils.hpp" +#include "TypeUtils.hpp" #include #include @@ -70,7 +71,8 @@ void ApplyBias(std::vector& v, float vScale, int32_t vOffset, } } -template +template, typename B = armnn::ResolveType> LayerTestResult SimpleConvolution2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -115,12 +117,12 @@ LayerTestResult SimpleConvolution2dTestImpl( // Note these tensors will use two (identical) batches. armnn::TensorInfo inputTensorInfo = - armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout); + armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType); armnn::TensorInfo outputTensorInfo = - armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout); + armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType); armnn::TensorInfo kernelDesc = - armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout); - armnn::TensorInfo biasDesc({static_cast(bias.size())}, armnn::GetDataType()); + armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType); + armnn::TensorInfo biasDesc({static_cast(bias.size())}, ArmnnBType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -230,7 +232,8 @@ LayerTestResult SimpleConvolution2dTestImpl( return ret; } -template +template, typename B = armnn::ResolveType> LayerTestResult SimpleConvolution2dNhwcTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -266,11 +269,11 @@ LayerTestResult SimpleConvolution2dNhwcTestImpl( bool biasEnabled = bias.size() > 0; // Creates the tensors. - armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType); armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels}, - armnn::GetDataType()); - armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType()); - armnn::TensorInfo biasDesc({static_cast(bias.size())}, armnn::GetDataType()); + ArmnnType); + armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType); + armnn::TensorInfo biasDesc({static_cast(bias.size())}, ArmnnBType); // Construct the input data. std::vector inputData; @@ -322,7 +325,8 @@ LayerTestResult SimpleConvolution2dNhwcTestImpl( return ret; } -template +template, typename B = armnn::ResolveType> LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -359,11 +363,11 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( // Creates the tensors. armnn::TensorInfo inputTensorInfo = - armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout); + armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType); armnn::TensorInfo outputTensorInfo = - armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout); - armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType()); - armnn::TensorInfo biasDesc({static_cast(bias.size())}, armnn::GetDataType()); + armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType); + armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType); + armnn::TensorInfo biasDesc({static_cast(bias.size())}, ArmnnBType); // Set quantization parameters if the requested type is a quantized type. if (armnn::IsQuantizedType()) @@ -459,7 +463,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( return ret; } -template +template> LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -468,6 +472,8 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( bool biasEnabled, const armnn::DataLayout layout) { + using B = armnn::ResolveType; + unsigned int inputHeight = 3; unsigned int inputWidth = 3; unsigned int inputChannels = 2; @@ -484,12 +490,12 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( unsigned int outputNum = inputNum; armnn::TensorInfo inputTensorInfo = - armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout); + armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType); armnn::TensorInfo outputTensorInfo = - armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout); + armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType); armnn::TensorInfo kernelDesc({kernelDepthMultiplier, kernelChannels, kernelHeight, kernelWidth}, - armnn::GetDataType()); - armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType()); + ArmnnType); + armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -602,7 +608,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( return ret; } -template +template> LayerTestResult DepthwiseConvolution2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -611,6 +617,8 @@ LayerTestResult DepthwiseConvolution2dTestImpl( bool biasEnabled, const armnn::DataLayout layout) { + using B = armnn::ResolveType; + unsigned int depthMultiplier = 2; unsigned int inputHeight = 8; @@ -626,13 +634,13 @@ LayerTestResult DepthwiseConvolution2dTestImpl( unsigned int outputChannels = inputChannels * depthMultiplier; unsigned int outputBatchSize = inputBatchSize; - armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo( - inputBatchSize, inputChannels, inputHeight, inputWidth, layout); - armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo( - outputBatchSize, outputChannels, outputHeight, outputWidth, layout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo( + inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo( + outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType); armnn::TensorInfo kernelDesc({depthMultiplier, inputChannels, kernelHeight, kernelWidth}, - armnn::GetDataType()); - armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType()); + ArmnnType); + armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -803,7 +811,8 @@ LayerTestResult DepthwiseConvolution2dTestImpl( return ret; } -template +template, typename B = armnn::ResolveType> LayerTestResult DepthwiseConvolution2dNhwcTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -836,11 +845,11 @@ LayerTestResult DepthwiseConvolution2dNhwcTestImpl( unsigned int outputWidth = boost::numeric_cast(outputExpected.shape()[2]); // Creates the tensors. - armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType); armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels}, - armnn::GetDataType()); - armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType()); - armnn::TensorInfo biasDesc({static_cast(bias.size())}, armnn::GetDataType()); + ArmnnType); + armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType); + armnn::TensorInfo biasDesc({static_cast(bias.size())}, ArmnnBType); // Set quantization parameters if the requested type is a quantized type. if (armnn::IsQuantizedType()) @@ -904,7 +913,7 @@ LayerTestResult DepthwiseConvolution2dNhwcTestImpl( return ret; } -template +template> LayerTestResult Convolution1dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -912,8 +921,7 @@ LayerTestResult Convolution1dTestImpl( int32_t qOffset, bool biasEnabled) { - using B = typename FullyConnectedBiasTypeForInputType::Type; - + using B = armnn::ResolveType; // Until we have a specialist 1D convolution layer, we can fake one using // 2D convolution with the final dimension set to 1. // I don't anticipate this being particularly slow, given that convolution is implemented @@ -928,10 +936,10 @@ LayerTestResult Convolution1dTestImpl( unsigned int stride = 1; unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride. - armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, armnn::GetDataType()); - armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, armnn::GetDataType()); - armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, armnn::GetDataType()); - armnn::TensorInfo biasInfo({outputChannels}, armnn::GetDataType()); + armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType); + armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType); + armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType); + armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -1023,9 +1031,7 @@ LayerTestResult Convolution1dTestImpl( return ret; } - - -template +template> LayerTestResult CompareConvolution2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1059,10 +1065,10 @@ LayerTestResult CompareConvolution2dTestImpl( unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth}; unsigned int biasShape[] = {outputChannels}; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); - kernelDesc = armnn::TensorInfo(4, kernelShape, armnn::GetDataType()); - biasDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); + kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType); + biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType); LayerTestResult ret(outputTensorInfo); @@ -1123,7 +1129,7 @@ LayerTestResult CompareConvolution2dTestImpl( return ret; } -template +template> LayerTestResult CompareDepthwiseConvolution2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1178,11 +1184,11 @@ LayerTestResult CompareDepthwiseConvolution2dTestImpl( float outputQScale = armnn::IsQuantizedType() ? 2.0f : 0; int32_t qOffset = 0; - inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), armnn::GetDataType(), inputsQScale, qOffset); - outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), armnn::GetDataType(), outputQScale, qOffset); - kernelDesc = armnn::TensorInfo(4, kernelShape.data(), armnn::GetDataType(), inputsQScale, qOffset); + inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset); + outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset); + kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset); biasDesc = armnn::TensorInfo( - 1, biasShape.data(), armnn::GetBiasDataType(armnn::GetDataType()), inputsQScale, qOffset); + 1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset); LayerTestResult ret(outputTensorInfo); diff --git a/src/backends/backendsCommon/test/DebugTestImpl.hpp b/src/backends/backendsCommon/test/DebugTestImpl.hpp index d112054198..14808f4856 100644 --- a/src/backends/backendsCommon/test/DebugTestImpl.hpp +++ b/src/backends/backendsCommon/test/DebugTestImpl.hpp @@ -80,7 +80,7 @@ LayerTestResult DebugTestImpl( return ret; } -template +template > LayerTestResult Debug4DTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -95,8 +95,8 @@ LayerTestResult Debug4DTest( desc.m_Parameters.m_LayerName = "TestOutput"; desc.m_Parameters.m_SlotIndex = 1; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -131,7 +131,7 @@ LayerTestResult Debug4DTest( expectedStringOutput); } -template +template > LayerTestResult Debug3DTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -145,8 +145,8 @@ LayerTestResult Debug3DTest( armnn::DebugQueueDescriptor desc; desc.m_Parameters.m_LayerName = "TestOutput"; - inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType); std::vector input = std::vector( { @@ -179,7 +179,7 @@ LayerTestResult Debug3DTest( expectedStringOutput); } -template +template > LayerTestResult Debug2DTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -193,8 +193,8 @@ LayerTestResult Debug2DTest( armnn::DebugQueueDescriptor desc; desc.m_Parameters.m_LayerName = "TestOutput"; - inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType); std::vector input = std::vector( { @@ -225,7 +225,7 @@ LayerTestResult Debug2DTest( expectedStringOutput); } -template +template > LayerTestResult Debug1DTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -239,8 +239,8 @@ LayerTestResult Debug1DTest( armnn::DebugQueueDescriptor desc; desc.m_Parameters.m_LayerName = "TestOutput"; - inputTensorInfo = armnn::TensorInfo(1, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(1, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(1, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(1, outputShape, ArmnnType); std::vector input = std::vector( { diff --git a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp index e7c0f01cc9..cfdae63c26 100644 --- a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp +++ b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp @@ -3,6 +3,7 @@ // SPDX-License-Identifier: MIT // +#include "TypeUtils.hpp" #include "WorkloadTestUtils.hpp" #include @@ -220,7 +221,7 @@ LayerTestResult FullyConnectedUint8Test( // Tests the fully connected layer with large values, optionally transposing weights. // Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode. // -template +template> LayerTestResult FullyConnectedLargeTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -252,10 +253,10 @@ LayerTestResult FullyConnectedLargeTestCommon( unsigned int biasShape[] = { outputChannels }; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType()); - weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::GetDataType()); - biasesDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType); + weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType); + biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index 0bf56e2445..3c78c82b6e 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -5,6 +5,7 @@ #include "LayerTests.hpp" #include "WorkloadTestUtils.hpp" #include "TensorUtils.hpp" +#include "TypeUtils.hpp" #include "test/TensorHelpers.hpp" #include "TensorCopyUtils.hpp" @@ -75,12 +76,12 @@ static std::vector ConvInput3x8x16({ static std::vector Bias2({0, 2}); // Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled. -template +template> boost::multi_array GetBias2(bool biasEnabled, float qScale, int32_t qOffset) { if(biasEnabled) { - armnn::TensorInfo biasDesc({static_cast(Bias2.size())}, armnn::GetDataType()); + armnn::TensorInfo biasDesc({static_cast(Bias2.size())}, ArmnnType); boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(qScale, qOffset, Bias2)); return bias; } @@ -90,7 +91,7 @@ boost::multi_array GetBias2(bool biasEnabled, float qScale, int32_t qOffse } } -template +template> LayerTestResult SimpleConvolution2d3x5TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -100,11 +101,11 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( const armnn::DataLayout layout) { // Use common single-batch 3-channel 16x8 image. - armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType()); + armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType); boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(qScale, qOffset, ConvInput3x8x16)); // Use a 2-element batch with 3-channel 3x5 kernels. - armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType()); + armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( QuantizedVector(qScale, qOffset, { 1, 1, 1, @@ -146,7 +147,7 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( }))); // Expected output is 2 batch elements of a 1-channel 14x4 image. - armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType()); + armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( QuantizedVector(qScale, qOffset, { -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, @@ -162,18 +163,20 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }))); - return SimpleConvolution2dTestImpl(workloadFactory, - memoryManager, - input, - kernel, - GetBias2::Type>(biasEnabled, qScale, qOffset), - expectedOutput, - qScale, - qOffset, - layout); + return SimpleConvolution2dTestImpl( + workloadFactory, + memoryManager, + input, + kernel, + GetBias2(biasEnabled, qScale, qOffset), + expectedOutput, + qScale, + qOffset, + layout); } -template +template> LayerTestResult SimpleConvolution2d3x3TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -185,11 +188,11 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path. // Use common single-batch 3-channel 16x8 image. - armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType()); + armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType); boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(qScale, qOffset, ConvInput3x8x16)); // Use a 2-element batch of 3-channel 3x3 kernels. - armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType()); + armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( QuantizedVector(qScale, qOffset, { 1, 1, 1, @@ -219,7 +222,7 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( }))); // Expected output is 1 batch of a 2-channel 14x6 image. - armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType()); + armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( QuantizedVector(qScale, qOffset, { -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, @@ -237,18 +240,19 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }))); - return SimpleConvolution2dTestImpl(workloadFactory, - memoryManager, - input, - kernel, - GetBias2::Type>(biasEnabled, qScale, qOffset), - expectedOutput, - qScale, - qOffset, - layout); + return SimpleConvolution2dTestImpl( + workloadFactory, + memoryManager, + input, + kernel, + GetBias2(biasEnabled, qScale, qOffset), + expectedOutput, + qScale, + qOffset, + layout); } -template +template> LayerTestResult SimpleConvolution2d3x3NhwcTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -259,7 +263,7 @@ LayerTestResult SimpleConvolution2d3x3NhwcTestCommon( { // Use common single-batch 5x5 image. - armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType()); + armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType); boost::multi_array input = MakeTensor(inputDesc, { 1, 5, 2, 3, @@ -269,7 +273,7 @@ LayerTestResult SimpleConvolution2d3x3NhwcTestCommon( // Use a 2-element batch of 3-channel 3x3 kernels. - armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType()); + armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, { 4, 5, 6, 0, 0, 0, @@ -277,7 +281,7 @@ LayerTestResult SimpleConvolution2d3x3NhwcTestCommon( }); // Expected output is 1 batch of a 5x5 image. - armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType()); + armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType); const std::vector outputData = { @@ -288,18 +292,19 @@ LayerTestResult SimpleConvolution2d3x3NhwcTestCommon( boost::multi_array expectedOutput = MakeTensor(outputDesc, outputData); - return SimpleConvolution2dNhwcTestImpl(workloadFactory, - memoryManager, - input, - kernel, - boost::multi_array(), - expectedOutput, - dataLayout, - qScale, - qOffset); + return SimpleConvolution2dNhwcTestImpl( + workloadFactory, + memoryManager, + input, + kernel, + boost::multi_array(), + expectedOutput, + dataLayout, + qScale, + qOffset); } -template +template> LayerTestResult SimpleConvolution2d3x3Stride2x2TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -309,7 +314,7 @@ LayerTestResult SimpleConvolution2d3x3Stride2x2TestCommon( const armnn::DataLayout& dataLayout) { // Input is a single-batch, 1 channel, 5x5 image. - armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::GetDataType()); + armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType); boost::multi_array input = MakeTensor(inputDesc, { 1, 5, 2, 3, 5, @@ -320,7 +325,7 @@ LayerTestResult SimpleConvolution2d3x3Stride2x2TestCommon( }); // Use a 3x3 kernel. - armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType()); + armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, { 4, 5, 6, @@ -329,7 +334,7 @@ LayerTestResult SimpleConvolution2d3x3Stride2x2TestCommon( }); // Expected output is a single-batch, 1 channel, 3x3 image. - armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::GetDataType()); + armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType); const std::vector outputData = { @@ -347,21 +352,22 @@ LayerTestResult SimpleConvolution2d3x3Stride2x2TestCommon( uint32_t strideX = 2; uint32_t strideY = 2; - return SimpleConvolution2dNhwcTestImpl(workloadFactory, - memoryManager, - input, - kernel, - boost::multi_array(), - expectedOutput, - dataLayout, - qScale, - qOffset, - padLeft, - padTop, - padRight, - padBottom, - strideX, - strideY); + return SimpleConvolution2dNhwcTestImpl( + workloadFactory, + memoryManager, + input, + kernel, + boost::multi_array(), + expectedOutput, + dataLayout, + qScale, + qOffset, + padLeft, + padTop, + padRight, + padBottom, + strideX, + strideY); } LayerTestResult SimpleConvolution2d3x5Test( @@ -370,7 +376,8 @@ LayerTestResult SimpleConvolution2d3x5Test( bool biasEnabled, const armnn::DataLayout layout) { - return SimpleConvolution2d3x5TestCommon(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout); + return SimpleConvolution2d3x5TestCommon( + workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout); } LayerTestResult SimpleConvolution2d3x5Uint8Test( @@ -379,7 +386,8 @@ LayerTestResult SimpleConvolution2d3x5Uint8Test( bool biasEnabled, const armnn::DataLayout layout) { - return SimpleConvolution2d3x5TestCommon(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); + return SimpleConvolution2d3x5TestCommon( + workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); } LayerTestResult SimpleConvolution2d3x3Test( @@ -388,7 +396,8 @@ LayerTestResult SimpleConvolution2d3x3Test( bool biasEnabled, const armnn::DataLayout layout) { - return SimpleConvolution2d3x3TestCommon(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout); + return SimpleConvolution2d3x3TestCommon( + workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout); } LayerTestResult SimpleConvolution2d3x3NhwcTest( @@ -396,12 +405,13 @@ LayerTestResult SimpleConvolution2d3x3NhwcTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled) { - return SimpleConvolution2d3x3NhwcTestCommon(workloadFactory, - memoryManager, - 0.f, - 0, - biasEnabled, - armnn::DataLayout::NHWC); + return SimpleConvolution2d3x3NhwcTestCommon( + workloadFactory, + memoryManager, + 0.f, + 0, + biasEnabled, + armnn::DataLayout::NHWC); } LayerTestResult SimpleConvolution2d3x3Stride2x2Test( @@ -410,12 +420,13 @@ LayerTestResult SimpleConvolution2d3x3Stride2x2Test( bool biasEnabled, const armnn::DataLayout layout) { - return SimpleConvolution2d3x3Stride2x2TestCommon(workloadFactory, - memoryManager, - 0.f, - 0, - biasEnabled, - layout); + return SimpleConvolution2d3x3Stride2x2TestCommon( + workloadFactory, + memoryManager, + 0.f, + 0, + biasEnabled, + layout); } LayerTestResult SimpleConvolution2d3x3Uint8Test( @@ -424,10 +435,12 @@ LayerTestResult SimpleConvolution2d3x3Uint8Test( bool biasEnabled, const armnn::DataLayout layout) { - return SimpleConvolution2d3x3TestCommon(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); + return SimpleConvolution2d3x3TestCommon( + workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); } -template +template> LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -436,7 +449,7 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest int32_t qOffset) { // Use a single-batch 1-channel 3x3 image as input. - armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType()); + armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType); boost::multi_array input = MakeTensor(inputDesc, std::vector( QuantizedVector(qScale, qOffset, { 11,21,31, @@ -445,7 +458,7 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest }))); // Use 1 batch of a 1-channel 2x2 kernel. - armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType()); + armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( QuantizedVector(qScale, qOffset, { -11,-21, @@ -461,7 +474,7 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest //[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..] //[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..] //[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..] - armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType()); + armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( QuantizedVector(qScale, qOffset, { 0, 0, 0, 0, 0, 0, @@ -474,22 +487,24 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest 0, 0, 0, 0, 0, 0 }))); - return SimpleConvolution2dTestImpl(workloadFactory, - memoryManager, - input, - kernel, - GetBias2::Type>(false, qScale, qOffset), - expectedOutput, - qScale, - qOffset, - layout, - 1, // Padding left. - 2, // Padding top. - 3, // Padding right. - 4); // Padding bottom. + return SimpleConvolution2dTestImpl( + workloadFactory, + memoryManager, + input, + kernel, + GetBias2(false, qScale, qOffset), + expectedOutput, + qScale, + qOffset, + layout, + 1, // Padding left. + 2, // Padding top. + 3, // Padding right. + 4); // Padding bottom. } -template +template> LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -498,7 +513,7 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( int32_t qOffset) { // Use a single-batch 1-channel 5x5 image as input. - armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType()); + armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType); boost::multi_array input = MakeTensor(inputDesc, std::vector( QuantizedVector(qScale, qOffset, { 11,21,31,41,51, @@ -509,7 +524,7 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( }))); // Use 1 batch of a 1-channel 4x4 kernel. - armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType()); + armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( QuantizedVector(qScale, qOffset, { -11,-21,-31,-41, @@ -519,7 +534,7 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( }))); // Expected output is 1 batch of a 1-channel 5x5 image. - armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType()); + armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType); std::vector myVec(outputDesc.GetNumElements(), 0); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( QuantizedVector(qScale, qOffset, { @@ -530,11 +545,12 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( -5032, -7256, -9376, -6142, -3368, }))); - return SimpleConvolution2dTestImpl(workloadFactory, + return SimpleConvolution2dTestImpl( + workloadFactory, memoryManager, input, kernel, - GetBias2::Type>(false, qScale, qOffset), + GetBias2(false, qScale, qOffset), expectedOutput, qScale, qOffset, @@ -545,7 +561,8 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( 2); // Padding bottom. } -template +template> LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -555,7 +572,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( const armnn::DataLayout layout) { // Use a single-batch 2-channel 5x5 image as input. - armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, std::vector( QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { 0, 1, 2, 3, 4, @@ -572,7 +589,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( }))); // Use a depth multiplier of 1 on a 2-channel 4x4 kernel. - armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType()); + armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType); auto kernel = MakeTensor(kernelTensorInfo, std::vector( QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), { 32, 31, 30, 29, @@ -588,7 +605,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( // Expected output is 1 batch of a 2-channel 5x5 image. // Calculated using the python tensorflow library with strideX=1, strideY=1. - armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), { 1062, 1580, 1850, 1530, 1117, @@ -603,11 +620,12 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( 3100, 4352, 4452, 3517, 2465 }))); - return DepthwiseConvolution2dAsymmetricTestImpl(workloadFactory, + return DepthwiseConvolution2dAsymmetricTestImpl( + workloadFactory, memoryManager, input, kernel, - GetBias2::Type>(biasEnabled, qScale, qOffset), + GetBias2(biasEnabled, qScale, qOffset), expectedOutput, qScale, qOffset, @@ -620,7 +638,8 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( 1); // strideY } -template +template> LayerTestResult DepthwiseConvolution2dNhwcTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -628,7 +647,7 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( int32_t qOffset, bool biasEnabled) { - armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType); auto input = MakeTensor(inputTensorInfo, std::vector( QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { 0, 25, @@ -662,7 +681,7 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 24, 49 }))); - armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType()); + armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType); auto kernel = MakeTensor(kernelTensorInfo, std::vector( QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), { 32, 31, 30, 29, @@ -676,7 +695,7 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 4, 3, 2, 1 }))); - armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), { 1062, 1550, @@ -710,11 +729,12 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 2457, 2465 }))); - return DepthwiseConvolution2dNhwcTestImpl(workloadFactory, + return DepthwiseConvolution2dNhwcTestImpl( + workloadFactory, memoryManager, input, kernel, - GetBias2::Type>(biasEnabled, qScale, qOffset), + GetBias2(biasEnabled, qScale, qOffset), expectedOutput, qScale, qOffset, @@ -732,8 +752,9 @@ Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout layout) { - return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon( - workloadFactory, memoryManager, layout, 0.0f, 0); + return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon + ( + workloadFactory, memoryManager, layout, 0.0f, 0); } LayerTestResult Convolution2dAsymmetricPaddingTest( @@ -741,7 +762,7 @@ LayerTestResult Convolution2dAsymmetricPaddingTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout layout) { - return SimpleConvolution2dAsymmetricPaddingTestCommon( + return SimpleConvolution2dAsymmetricPaddingTestCommon( workloadFactory, memoryManager, layout, 0.0f, 0); } @@ -751,7 +772,7 @@ LayerTestResult DepthwiseConvolution2dTest( bool biasEnabled, const armnn::DataLayout layout) { - return DepthwiseConvolution2dTestImpl( + return DepthwiseConvolution2dTestImpl( workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout); } @@ -760,7 +781,8 @@ LayerTestResult DepthwiseConvolution2dDepthNhwcTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled) { - return DepthwiseConvolution2dNhwcTestCommon(workloadFactory, memoryManager, 0.0f, 0, biasEnabled); + return DepthwiseConvolution2dNhwcTestCommon( + workloadFactory, memoryManager, 0.0f, 0, biasEnabled); } LayerTestResult DepthwiseConvolution2dDepthMul1Test( @@ -769,7 +791,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1Test( bool biasEnabled, const armnn::DataLayout layout) { - return DepthwiseConvolution2dDepthMul1TestImpl( + return DepthwiseConvolution2dDepthMul1TestImpl( workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout); } @@ -779,7 +801,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTest( bool biasEnabled, const armnn::DataLayout layout) { - return DepthwiseConvolution2dAsymmetricTestCommon( + return DepthwiseConvolution2dAsymmetricTestCommon( workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout); } @@ -789,7 +811,7 @@ LayerTestResult DepthwiseConvolution2dUint8Test( bool biasEnabled, const armnn::DataLayout layout) { - return DepthwiseConvolution2dTestImpl( + return DepthwiseConvolution2dTestImpl( workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); } @@ -799,7 +821,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1Uint8Test( bool biasEnabled, const armnn::DataLayout layout) { - return DepthwiseConvolution2dDepthMul1TestImpl( + return DepthwiseConvolution2dDepthMul1TestImpl( workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout); } @@ -808,7 +830,8 @@ LayerTestResult Convolution1dTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled) { - return Convolution1dTestImpl(workloadFactory, memoryManager, 0.0f, 0, biasEnabled); + return Convolution1dTestImpl( + workloadFactory, memoryManager, 0.0f, 0, biasEnabled); } LayerTestResult Convolution1dUint8Test( @@ -816,7 +839,8 @@ LayerTestResult Convolution1dUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool biasEnabled) { - return Convolution1dTestImpl(workloadFactory, memoryManager, 0.1f, 128, biasEnabled); + return Convolution1dTestImpl( + workloadFactory, memoryManager, 0.1f, 128, biasEnabled); } LayerTestResult CompareConvolution2dTest( @@ -824,30 +848,29 @@ LayerTestResult CompareConvolution2dTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory) { - return CompareConvolution2dTestImpl(workloadFactory, memoryManager, refWorkloadFactory); + return CompareConvolution2dTestImpl( + workloadFactory, memoryManager, refWorkloadFactory); } -template -LayerTestResult CompareDepthwiseConvolution2dTest( +LayerTestResult CompareDepthwiseConvolution2dFloatTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, armnn::IWorkloadFactory& refWorkloadFactory, const armnn::DataLayout layout) { - return CompareDepthwiseConvolution2dTestImpl(workloadFactory, memoryManager, refWorkloadFactory, layout); + return CompareDepthwiseConvolution2dTestImpl( + workloadFactory, memoryManager, refWorkloadFactory, layout); } -template LayerTestResult CompareDepthwiseConvolution2dTest( - armnn::IWorkloadFactory&, - const armnn::IBackendInternal::IMemoryManagerSharedPtr&, - armnn::IWorkloadFactory&, - const armnn::DataLayout); - -template LayerTestResult CompareDepthwiseConvolution2dTest( - armnn::IWorkloadFactory&, - const armnn::IBackendInternal::IMemoryManagerSharedPtr&, - armnn::IWorkloadFactory&, - const armnn::DataLayout); +LayerTestResult CompareDepthwiseConvolution2dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::IWorkloadFactory& refWorkloadFactory, + const armnn::DataLayout layout) +{ + return CompareDepthwiseConvolution2dTestImpl( + workloadFactory, memoryManager, refWorkloadFactory, layout); +} LayerTestResult SimpleNormalizationAcrossTest( armnn::IWorkloadFactory& workloadFactory, @@ -881,7 +904,7 @@ LayerTestResult SimpleSoftmaxTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float beta) { - return SimpleSoftmaxTestImpl(workloadFactory, memoryManager, beta); + return SimpleSoftmaxTestImpl(workloadFactory, memoryManager, beta); } LayerTestResult SimpleSoftmaxUint8Test( @@ -889,7 +912,7 @@ LayerTestResult SimpleSoftmaxUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float beta) { - return SimpleSoftmaxTestImpl(workloadFactory, memoryManager, beta); + return SimpleSoftmaxTestImpl(workloadFactory, memoryManager, beta); } LayerTestResult CompareNormalizationTest( @@ -908,7 +931,8 @@ LayerTestResult CompareSoftmaxTest( armnn::IWorkloadFactory& refWorkloadFactory, float beta) { - return CompareSoftmaxTestImpl(workloadFactory, memoryManager, refWorkloadFactory, beta); + return CompareSoftmaxTestImpl( + workloadFactory, memoryManager, refWorkloadFactory, beta); } LayerTestResult CompareSoftmaxUint8Test( @@ -917,46 +941,47 @@ LayerTestResult CompareSoftmaxUint8Test( armnn::IWorkloadFactory& refWorkloadFactory, float beta) { - return CompareSoftmaxTestImpl(workloadFactory, memoryManager, refWorkloadFactory, beta); + return CompareSoftmaxTestImpl( + workloadFactory, memoryManager, refWorkloadFactory, beta); } std::vector> SplitterTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SplitterTestCommon(workloadFactory, memoryManager); + return SplitterTestCommon(workloadFactory, memoryManager); } std::vector> SplitterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SplitterTestCommon(workloadFactory, memoryManager, 1.0f, 0); + return SplitterTestCommon(workloadFactory, memoryManager, 1.0f, 0); } LayerTestResult CopyViaSplitterTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return CopyViaSplitterTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return CopyViaSplitterTestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult CopyViaSplitterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return CopyViaSplitterTestImpl(workloadFactory, memoryManager, 1.0f, 0); + return CopyViaSplitterTestImpl(workloadFactory, memoryManager, 1.0f, 0); } LayerTestResult LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32); boost::multi_array input = MakeTensor(inputDesc, std::vector( { 2., 3., 3., 4. })); - armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType()); + armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})); @@ -968,12 +993,12 @@ LayerTestResult LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType()); + armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32); boost::multi_array input = MakeTensor(inputDesc, std::vector( {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})); - armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType()); + armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f, @@ -989,12 +1014,12 @@ LayerTestResult LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType()); + armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32); boost::multi_array input = MakeTensor(inputDesc, std::vector( {2., 3., 3., 4.})); - armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType()); + armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f, -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})); @@ -1216,16 +1241,16 @@ LayerTestResult AdditionTest( return ret; } -template +template> LayerTestResult AdditionBroadcastTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType()); - armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType); + armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType); if (armnn::IsQuantizedType()) { @@ -1294,16 +1319,16 @@ LayerTestResult AdditionBroadcastTestImpl( return ret; } -template +template> LayerTestResult AdditionBroadcast1ElementTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType()); - armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType); + armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType); if (armnn::IsQuantizedType()) { @@ -1371,28 +1396,32 @@ LayerTestResult AdditionBroadcastTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return AdditionBroadcastTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return AdditionBroadcastTestImpl( + workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult AdditionBroadcastUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return AdditionBroadcastTestImpl(workloadFactory, memoryManager, 2.f, 0); + return AdditionBroadcastTestImpl( + workloadFactory, memoryManager, 2.f, 0); } LayerTestResult AdditionBroadcast1ElementTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return AdditionBroadcast1ElementTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return AdditionBroadcast1ElementTestImpl( + workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult AdditionBroadcast1ElementUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return AdditionBroadcast1ElementTestImpl(workloadFactory, memoryManager, 0.1333333f, 128); + return AdditionBroadcast1ElementTestImpl( + workloadFactory, memoryManager, 0.1333333f, 128); } LayerTestResult CompareAdditionTest( @@ -1754,24 +1783,24 @@ std::unique_ptr CreateWorkload( } namespace { - template - LayerTestResult ElementwiseTestHelper + template > + LayerTestResult ElementwiseTestHelper (armnn::IWorkloadFactory & workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager, - const unsigned int shape0[4], std::vector values0, - const unsigned int shape1[4], std::vector values1, - const unsigned int outShape[4], std::vector outValues, + const unsigned int shape0[4], std::vector values0, + const unsigned int shape1[4], std::vector values1, + const unsigned int outShape[4], std::vector outValues, float qScale = 0.0f, int qOffset = 0) { const size_t dimensionCount = 4; - armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::GetDataType()}; - armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::GetDataType()}; - armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::GetDataType()}; + armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnType}; + armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnType}; + armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnType}; - auto input0 = MakeTensor(inputTensorInfo0, values0); - auto input1 = MakeTensor(inputTensorInfo1, values1); + auto input0 = MakeTensor(inputTensorInfo0, values0); + auto input1 = MakeTensor(inputTensorInfo1, values1); - if (armnn::IsQuantizedType()) + if (armnn::IsQuantizedType()) { inputTensorInfo0.SetQuantizationScale(qScale); inputTensorInfo0.SetQuantizationOffset(qOffset); @@ -1783,7 +1812,7 @@ namespace { outputTensorInfo.SetQuantizationOffset(qOffset); } - LayerTestResult ret(outputTensorInfo); + LayerTestResult ret(outputTensorInfo); std::unique_ptr inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0); std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); @@ -1807,7 +1836,7 @@ namespace { CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - ret.outputExpected = MakeTensor(outputTensorInfo, outValues); + ret.outputExpected = MakeTensor(outputTensorInfo, outValues); return ret; } } @@ -1831,15 +1860,15 @@ LayerTestResult EqualSimpleTest(armnn::IWorkloadFactory& workloadFacto std::vector output({ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape, - input0, - shape, - input1, - shape, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); } LayerTestResult EqualBroadcast1ElementTest( @@ -1854,15 +1883,15 @@ LayerTestResult EqualBroadcast1ElementTest( std::vector output({ 1, 0, 0, 0, 0, 0, 0, 0}); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); } LayerTestResult EqualBroadcast1DVectorTest( @@ -1880,15 +1909,15 @@ LayerTestResult EqualBroadcast1DVectorTest( std::vector output({ 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); } LayerTestResult EqualUint8Test( @@ -1907,17 +1936,17 @@ LayerTestResult EqualUint8Test( std::vector output({ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape, - input0, - shape, - input1, - shape, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output, + 1.0f, + 0); } LayerTestResult EqualBroadcast1ElementUint8Test( @@ -1935,17 +1964,17 @@ LayerTestResult EqualBroadcast1ElementUint8Test( std::vector output({ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output, + 1.0f, + 0); } LayerTestResult EqualBroadcast1DVectorUint8Test( @@ -1963,17 +1992,17 @@ LayerTestResult EqualBroadcast1DVectorUint8Test( std::vector output({ 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output, + 1.0f, + 0); } LayerTestResult GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory, @@ -1995,15 +2024,15 @@ LayerTestResult GreaterSimpleTest(armnn::IWorkloadFactory& workloadFac std::vector output({ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape, - input0, - shape, - input1, - shape, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); } LayerTestResult GreaterBroadcast1ElementTest( @@ -2018,15 +2047,15 @@ LayerTestResult GreaterBroadcast1ElementTest( std::vector output({ 0, 1, 1, 1, 1, 1, 1, 1}); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); } LayerTestResult GreaterBroadcast1DVectorTest( @@ -2044,15 +2073,15 @@ LayerTestResult GreaterBroadcast1DVectorTest( std::vector output({ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); } LayerTestResult GreaterUint8Test( @@ -2071,17 +2100,17 @@ LayerTestResult GreaterUint8Test( std::vector output({ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape, - input0, - shape, - input1, - shape, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output, + 1.0f, + 0); } LayerTestResult GreaterBroadcast1ElementUint8Test( @@ -2099,17 +2128,17 @@ LayerTestResult GreaterBroadcast1ElementUint8Test( std::vector output({ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output, + 1.0f, + 0); } LayerTestResult GreaterBroadcast1DVectorUint8Test( @@ -2127,17 +2156,17 @@ LayerTestResult GreaterBroadcast1DVectorUint8Test( std::vector output({ 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output, + 1.0f, + 0); } LayerTestResult MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory, @@ -2159,15 +2188,15 @@ LayerTestResult MaximumSimpleTest(armnn::IWorkloadFactory& workloadFac std::vector output({ 2, 2, 2, 2, 5, 5, 5, 5, 4, 4, 4, 4, 5, 5, 5, 5 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape, - input0, - shape, - input1, - shape, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); } LayerTestResult MaximumBroadcast1ElementTest( @@ -2182,15 +2211,15 @@ LayerTestResult MaximumBroadcast1ElementTest( std::vector output({ 2, 2, 3, 4, 5, 6, 7, 8}); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); } LayerTestResult MaximumBroadcast1DVectorTest( @@ -2208,15 +2237,15 @@ LayerTestResult MaximumBroadcast1DVectorTest( std::vector output({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); } LayerTestResult MaximumUint8Test( @@ -2235,17 +2264,17 @@ LayerTestResult MaximumUint8Test( std::vector output({ 2, 2, 2, 2, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5, 5, 5 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape, - input0, - shape, - input1, - shape, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output, + 1.0f, + 0); } LayerTestResult MaximumBroadcast1ElementUint8Test( @@ -2263,17 +2292,17 @@ LayerTestResult MaximumBroadcast1ElementUint8Test( std::vector output({ 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output, + 1.0f, + 0); } LayerTestResult MaximumBroadcast1DVectorUint8Test( @@ -2291,17 +2320,17 @@ LayerTestResult MaximumBroadcast1DVectorUint8Test( std::vector output({ 1, 10, 3, 4, 10, 6, 7, 10, 9, 10, 11, 12 }); - return ElementwiseTestHelper - (workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output, + 1.0f, + 0); } LayerTestResult MinimumBroadcast1ElementTest1( @@ -2316,14 +2345,15 @@ LayerTestResult MinimumBroadcast1ElementTest1( std::vector output({ 1, 2, 2, 2, 2, 2, 2, 2}); - return ElementwiseTestHelper(workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); } @@ -2339,14 +2369,15 @@ LayerTestResult MinimumBroadcast1ElementTest2( std::vector output({ 1, 5, 3, 2, 5, 5, 1, 5}); - return ElementwiseTestHelper(workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); } LayerTestResult MinimumBroadcast1DVectorUint8Test( @@ -2364,16 +2395,17 @@ LayerTestResult MinimumBroadcast1DVectorUint8Test( std::vector output({ 1, 2, 3, 1, 2, 1, 1, 1, 2, 1, 2, 3 }); - return ElementwiseTestHelper(workloadFactory, - memoryManager, - shape0, - input0, - shape1, - input1, - shape0, - output, - 1.0f, - 0); + return ElementwiseTestHelper( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output, + 1.0f, + 0); } namespace { @@ -3044,20 +3076,20 @@ void Concatenate( } } -template +template> LayerTestResult Concatenation1dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType); auto input0 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, 3.0f })); auto input1 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 4.0f, 5.0f, 6.0f })); auto input2 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 7.0f, 8.0f, 9.0f })); - armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType); LayerTestResult result(outputTensorInfo); @@ -3083,10 +3115,10 @@ LayerTestResult Concatenation1dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation1dTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation1dTestImpl(workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -3095,7 +3127,7 @@ LayerTestResult Concatenation2dTestImpl( const float qScale, const int32_t qOffset) { - armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType); auto input0 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0 @@ -3137,17 +3169,18 @@ LayerTestResult Concatenation2dTestImpl( return result; } -template +template> LayerTestResult Concatenation2dDim0TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType); + + LayerTestResult result = Concatenation2dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset); - LayerTestResult result = - Concatenation2dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0 1.0f, 2.0f, 3.0f, @@ -3175,20 +3208,21 @@ LayerTestResult Concatenation2dDim0Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation2dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation2dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation2dDim1TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType); + + LayerTestResult result = Concatenation2dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset); - LayerTestResult result = - Concatenation2dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, @@ -3204,17 +3238,17 @@ LayerTestResult Concatenation2dDim1Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation2dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation2dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation2dDim0DiffInputDimsTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType()); + armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType); auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0 1.0f, 2.0f, 3.0f, @@ -3223,7 +3257,7 @@ LayerTestResult Concatenation2dDim0DiffInputDimsTestImpl( 10.0f, 11.0f, 12.0f, })); - armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType()); + armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType); auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0 4.0f, 5.0f, 6.0f, @@ -3235,13 +3269,13 @@ LayerTestResult Concatenation2dDim0DiffInputDimsTestImpl( 7.0f, 8.0f, 9.0f, })); - armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType()); + armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType); auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 1 16.0f, 17.0f, 18.0f, })); - armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType); LayerTestResult result(outputTensorInfo); std::vector output; @@ -3282,17 +3316,18 @@ LayerTestResult Concatenation2dDim0DiffInputDimsTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation2dDim0DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation2dDim0DiffInputDimsTestImpl( + workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation2dDim1DiffInputDimsTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType()); + armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType); auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0 1.0f, 2.0f, 3.0f, @@ -3301,7 +3336,7 @@ LayerTestResult Concatenation2dDim1DiffInputDimsTestImpl( 10.0f, 11.0f, 12.0f, })); - armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType()); + armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType); auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, @@ -3310,7 +3345,7 @@ LayerTestResult Concatenation2dDim1DiffInputDimsTestImpl( 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, })); - armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType()); + armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType); auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0 9.0f, @@ -3319,7 +3354,7 @@ LayerTestResult Concatenation2dDim1DiffInputDimsTestImpl( 18.0f })); - armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType); LayerTestResult result(outputTensorInfo); std::vector output; @@ -3348,10 +3383,11 @@ LayerTestResult Concatenation2dDim1DiffInputDimsTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation2dDim1DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation2dDim1DiffInputDimsTestImpl( + workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation3dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -3361,7 +3397,7 @@ LayerTestResult Concatenation3dTestImpl( float qScale, int32_t qOffset) { - armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType); auto input0 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 @@ -3439,17 +3475,18 @@ LayerTestResult Concatenation3dTestImpl( return result; } -template +template> LayerTestResult Concatenation3dDim0TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType); + + LayerTestResult result = Concatenation3dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset); - LayerTestResult result = - Concatenation3dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 1.0f, 2.0f, @@ -3513,20 +3550,20 @@ LayerTestResult Concatenation3dDim0Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation3dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation3dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation3dDim1TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType); - LayerTestResult result = - Concatenation3dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset); + LayerTestResult result = Concatenation3dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 @@ -3591,10 +3628,10 @@ LayerTestResult Concatenation3dDim1Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation3dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation3dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation3dDim2TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -3602,10 +3639,10 @@ LayerTestResult Concatenation3dDim2TestImpl( float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType); - LayerTestResult result = - Concatenation3dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset); + LayerTestResult result = Concatenation3dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 @@ -3635,17 +3672,18 @@ LayerTestResult Concatenation3dDim2Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concatenation3dDim2TestImpl(workloadFactory, memoryManager, useSubtensor, 0.0f, 0); + return Concatenation3dDim2TestImpl( + workloadFactory, memoryManager, useSubtensor, 0.0f, 0); } -template +template> LayerTestResult Concatenation3dDim0DiffInputDimsTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType); auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 1.0f, 2.0f, @@ -3666,7 +3704,7 @@ LayerTestResult Concatenation3dDim0DiffInputDimsTestImpl( 23.0f, 24.0f })); - armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType); auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 7.0f, 8.0f, @@ -3678,7 +3716,7 @@ LayerTestResult Concatenation3dDim0DiffInputDimsTestImpl( 11.0f, 12.0f, })); - armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType); auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 25.0f, 26.0f, @@ -3708,7 +3746,7 @@ LayerTestResult Concatenation3dDim0DiffInputDimsTestImpl( 35.0f, 36.0f })); - armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType); LayerTestResult result(outputTensorInfo); std::vector output; @@ -3785,17 +3823,18 @@ LayerTestResult Concatenation3dDim0DiffInputDimsTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation3dDim0DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation3dDim0DiffInputDimsTestImpl( + workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation3dDim1DiffInputDimsTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType); auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 1.0f, 2.0f, @@ -3816,7 +3855,7 @@ LayerTestResult Concatenation3dDim1DiffInputDimsTestImpl( 23.0f, 24.0f })); - armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType()); + armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType); auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 7.0f, 8.0f, @@ -3843,7 +3882,7 @@ LayerTestResult Concatenation3dDim1DiffInputDimsTestImpl( 15.0f, 16.0f, })); - armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType()); + armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType); auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 17.0f, 18.0f, @@ -3852,7 +3891,7 @@ LayerTestResult Concatenation3dDim1DiffInputDimsTestImpl( 31.0f, 32.0f, })); - armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType); LayerTestResult result(outputTensorInfo); std::vector output; @@ -3923,10 +3962,11 @@ LayerTestResult Concatenation3dDim1DiffInputDimsTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation3dDim1DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation3dDim1DiffInputDimsTestImpl( + workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation3dDim2DiffInputDimsTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -3934,7 +3974,7 @@ LayerTestResult Concatenation3dDim2DiffInputDimsTestImpl( float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType); auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 1.0f, 2.0f, @@ -3955,7 +3995,7 @@ LayerTestResult Concatenation3dDim2DiffInputDimsTestImpl( 23.0f, 24.0f })); - armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType()); + armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType); auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 7.0f, @@ -3976,7 +4016,7 @@ LayerTestResult Concatenation3dDim2DiffInputDimsTestImpl( 29.0f })); - armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType()); + armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType); auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { // Batch 0, Channel 0 13.0f, 14.0f, 50.0f, @@ -3997,7 +4037,7 @@ LayerTestResult Concatenation3dDim2DiffInputDimsTestImpl( 35.0f, 36.0f, 55.0f, })); - armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType); LayerTestResult result(outputTensorInfo); std::vector output; @@ -4039,10 +4079,11 @@ LayerTestResult Concatenation3dDim2DiffInputDimsTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concatenation3dDim2DiffInputDimsTestImpl(workloadFactory, memoryManager, useSubtensor, 0.0f, 0); + return Concatenation3dDim2DiffInputDimsTestImpl( + workloadFactory, memoryManager, useSubtensor, 0.0f, 0); } -template +template> LayerTestResult Concatenation4dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -4052,7 +4093,7 @@ LayerTestResult Concatenation4dTestImpl( float qScale, int32_t qOffset) { - armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType); auto input0 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, @@ -4099,17 +4140,18 @@ LayerTestResult Concatenation4dTestImpl( return result; } -template +template> LayerTestResult Concatenation4dDim0TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType); + + LayerTestResult result = Concatenation4dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset); - LayerTestResult result = Concatenation4dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 0, - true, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, 3.0f, 4.0f, @@ -4139,20 +4181,21 @@ LayerTestResult Concatenation4dDim0Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation4dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation4dDim1TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType); + + LayerTestResult result = Concatenation4dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset); - LayerTestResult result = Concatenation4dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 1, - true, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, 3.0f, 4.0f, @@ -4183,20 +4226,21 @@ LayerTestResult Concatenation4dDim1Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation4dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation4dDim2TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType); + + LayerTestResult result = Concatenation4dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset); - LayerTestResult result = Concatenation4dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 2, - true, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, 3.0f, 4.0f, @@ -4227,10 +4271,10 @@ LayerTestResult Concatenation4dDim2Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDim2TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation4dDim2TestImpl(workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation4dDim3TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -4238,10 +4282,11 @@ LayerTestResult Concatenation4dDim3TestImpl( int32_t qOffset, bool useSubtensor) { - armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType); + + LayerTestResult result = Concatenation4dTestImpl( + workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset); - LayerTestResult result = Concatenation4dTestImpl(workloadFactory, memoryManager, outputTensorInfo, 3, - useSubtensor, qScale, qOffset); result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, 11.0f, 12.0f, @@ -4273,10 +4318,11 @@ LayerTestResult Concatenation4dDim3Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concatenation4dDim3TestImpl(workloadFactory, memoryManager, 0.0f, 0, useSubtensor); + return Concatenation4dDim3TestImpl( + workloadFactory, memoryManager, 0.0f, 0, useSubtensor); } -template +template> LayerTestResult Concatenation4dDiffShapeDim0TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -4284,7 +4330,7 @@ LayerTestResult Concatenation4dDiffShapeDim0TestImpl( int32_t qOffset) { unsigned int dimension = 0; - armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType); auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, @@ -4295,7 +4341,7 @@ LayerTestResult Concatenation4dDiffShapeDim0TestImpl( 11.0f, 12.0f })); - armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType); auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, { 11.0f, 12.0f, @@ -4314,7 +4360,7 @@ LayerTestResult Concatenation4dDiffShapeDim0TestImpl( })); - armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType); LayerTestResult result(outputTensorInfo); @@ -4360,10 +4406,11 @@ LayerTestResult Concatenation4dDiffShapeDim0Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDiffShapeDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation4dDiffShapeDim0TestImpl( + workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation4dDiffShapeDim1TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -4371,7 +4418,7 @@ LayerTestResult Concatenation4dDiffShapeDim1TestImpl( int32_t qOffset) { unsigned int dimension = 1; - armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType); auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, @@ -4382,7 +4429,7 @@ LayerTestResult Concatenation4dDiffShapeDim1TestImpl( 11.0f, 12.0f })); - armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType); auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, { 11.0f, 12.0f, @@ -4392,7 +4439,7 @@ LayerTestResult Concatenation4dDiffShapeDim1TestImpl( })); - armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType); LayerTestResult result(outputTensorInfo); @@ -4428,10 +4475,11 @@ LayerTestResult Concatenation4dDiffShapeDim1Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDiffShapeDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation4dDiffShapeDim1TestImpl( + workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation4dDiffShapeDim2TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -4439,7 +4487,7 @@ LayerTestResult Concatenation4dDiffShapeDim2TestImpl( int32_t qOffset) { unsigned int dimension = 2; - armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType); auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, @@ -4450,7 +4498,7 @@ LayerTestResult Concatenation4dDiffShapeDim2TestImpl( 11.0f, 12.0f })); - armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType); auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, { 11.0f, 12.0f, @@ -4464,7 +4512,7 @@ LayerTestResult Concatenation4dDiffShapeDim2TestImpl( 27.0f, 28.0f })); - armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType); LayerTestResult result(outputTensorInfo); @@ -4507,10 +4555,11 @@ LayerTestResult Concatenation4dDiffShapeDim2Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDiffShapeDim2TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concatenation4dDiffShapeDim2TestImpl( + workloadFactory, memoryManager, 0.0f, 0); } -template +template> LayerTestResult Concatenation4dDiffShapeDim3TestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -4519,7 +4568,7 @@ LayerTestResult Concatenation4dDiffShapeDim3TestImpl( bool useSubtensor) { unsigned int dimension = 3; - armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType); auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, @@ -4530,7 +4579,7 @@ LayerTestResult Concatenation4dDiffShapeDim3TestImpl( 11.0f, 12.0f })); - armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType); auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, { 11.0f, 12.0f, 13.0f, @@ -4543,7 +4592,7 @@ LayerTestResult Concatenation4dDiffShapeDim3TestImpl( 26.0f, 27.0f, 28.0f })); - armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType); LayerTestResult result(outputTensorInfo); @@ -4576,7 +4625,8 @@ LayerTestResult Concatenation4dDiffShapeDim3Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concatenation4dDiffShapeDim3TestImpl(workloadFactory, memoryManager, 0.0f, 0, useSubtensor); + return Concatenation4dDiffShapeDim3TestImpl( + workloadFactory, memoryManager, 0.0f, 0, useSubtensor); } LayerTestResult ResizeBilinearNopTest( @@ -4584,8 +4634,11 @@ LayerTestResult ResizeBilinearNopTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); - const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); + const armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32); + + const armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32); std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, @@ -4638,8 +4691,11 @@ LayerTestResult SimpleResizeBilinearTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); - const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout); + const armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32); + + const armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32); std::vector inputData({ 1.0f, 255.0f, @@ -4704,8 +4760,11 @@ LayerTestResult ResizeBilinearSqMinTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); - const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); + const armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32); + + const armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32); std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, @@ -4770,8 +4829,11 @@ LayerTestResult ResizeBilinearMinTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout); - const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout); + const armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32); + + const armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32); std::vector inputData({ 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, @@ -4834,8 +4896,11 @@ LayerTestResult ResizeBilinearMagTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout); - const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout); + const armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32); + + const armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32); std::vector inputData({ 1.0f, 2.0f, @@ -5021,7 +5086,7 @@ float CalcInvL2Norm(std::initializer_list elements) } // anonymous namespace -template +template> LayerTestResult Pad2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -5031,8 +5096,8 @@ LayerTestResult Pad2dTestCommon( const armnn::TensorShape inputShape{ 3, 3 }; const armnn::TensorShape outputShape{ 7, 7 }; - const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType()); - const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType()); + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); std::vector inputValues( QuantizedVector(qScale, qOffset, @@ -5089,7 +5154,7 @@ LayerTestResult Pad2dTestCommon( return result; } -template +template> LayerTestResult Pad3dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -5099,8 +5164,8 @@ LayerTestResult Pad3dTestCommon( const armnn::TensorShape inputShape{ 2, 2, 2 }; const armnn::TensorShape outputShape{ 3, 5, 6 }; - const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType()); - const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType()); + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); std::vector inputValues( QuantizedVector(qScale,qOffset, @@ -5173,7 +5238,7 @@ LayerTestResult Pad3dTestCommon( return result; } -template +template> LayerTestResult Pad4dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -5183,8 +5248,8 @@ LayerTestResult Pad4dTestCommon( const armnn::TensorShape inputShape{ 2, 2, 3, 2 }; const armnn::TensorShape outputShape{ 4, 5, 7, 4 }; - const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType()); - const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType()); + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); std::vector inputValues( QuantizedVector(qScale,qOffset, @@ -5414,42 +5479,42 @@ LayerTestResult PadUint82dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Pad2dTestCommon(workloadFactory, memoryManager, 1.0f, 0); + return Pad2dTestCommon(workloadFactory, memoryManager, 1.0f, 0); } LayerTestResult PadUint83dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Pad3dTestCommon(workloadFactory, memoryManager, 1.0f, 0); + return Pad3dTestCommon(workloadFactory, memoryManager, 1.0f, 0); } LayerTestResult PadUint84dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Pad4dTestCommon(workloadFactory, memoryManager, 1.0f, 0); + return Pad4dTestCommon(workloadFactory, memoryManager, 1.0f, 0); } LayerTestResult PadFloat322dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Pad2dTestCommon(workloadFactory, memoryManager, 0.0f, 0); + return Pad2dTestCommon(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult PadFloat323dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Pad3dTestCommon(workloadFactory, memoryManager, 0.0f, 0); + return Pad3dTestCommon(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult PadFloat324dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Pad4dTestCommon(workloadFactory, memoryManager, 0.0f, 0); + return Pad4dTestCommon(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult L2Normalization1dTest( @@ -5777,7 +5842,7 @@ LayerTestResult L2Normalization4dTest( inputValues, expectedOutputValues, layout); } -template +template> LayerTestResult ConstantTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -5794,11 +5859,9 @@ LayerTestResult ConstantTestImpl( constexpr unsigned int outputChannels = inputChannels; constexpr unsigned int outputBatchSize = inputBatchSize; - armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType); - armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -5876,14 +5939,14 @@ LayerTestResult ConstantTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return ConstantTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return ConstantTestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult ConstantTestUint8( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return ConstantTestImpl(workloadFactory, memoryManager, 1.0f, 0); + return ConstantTestImpl(workloadFactory, memoryManager, 1.0f, 0); } LayerTestResult MergerUint8Test( @@ -6934,9 +6997,10 @@ LayerTestResult BatchNormTest( 2.f, 4.f }; - return BatchNormTestImpl(workloadFactory, memoryManager, - inputOutputShape, inputValues, expectedOutputValues, - 0.f, 0, armnn::DataLayout::NCHW); + return BatchNormTestImpl( + workloadFactory, memoryManager, + inputOutputShape, inputValues, expectedOutputValues, + 0.f, 0, armnn::DataLayout::NCHW); } LayerTestResult BatchNormNhwcTest( @@ -6978,9 +7042,10 @@ LayerTestResult BatchNormNhwcTest( 6.f, 4.f }; - return BatchNormTestImpl(workloadFactory, memoryManager, - inputOutputShape, inputValues, expectedOutputValues, - 0.f, 0, armnn::DataLayout::NHWC); + return BatchNormTestImpl( + workloadFactory, memoryManager, + inputOutputShape, inputValues, expectedOutputValues, + 0.f, 0, armnn::DataLayout::NHWC); } LayerTestResult BatchNormUint8Test( @@ -7018,9 +7083,10 @@ LayerTestResult BatchNormUint8Test( 2.f, 4.f }; - return BatchNormTestImpl(workloadFactory, memoryManager, - inputOutputShape, inputValues, expectedOutputValues, - 1.f/20.f, 50, armnn::DataLayout::NCHW); + return BatchNormTestImpl( + workloadFactory, memoryManager, + inputOutputShape, inputValues, expectedOutputValues, + 1.f/20.f, 50, armnn::DataLayout::NCHW); } LayerTestResult BatchNormUint8NhwcTest( @@ -7062,65 +7128,68 @@ LayerTestResult BatchNormUint8NhwcTest( 6.f, 4.f }; - return BatchNormTestImpl(workloadFactory, memoryManager, - inputOutputShape, inputValues, expectedOutputValues, - 1.f/20.f, 50, armnn::DataLayout::NHWC); + return BatchNormTestImpl + (workloadFactory, memoryManager, + inputOutputShape, inputValues, expectedOutputValues, + 1.f/20.f, 50, armnn::DataLayout::NHWC); } LayerTestResult ConstantUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return ConstantTestImpl(workloadFactory, memoryManager, 2e-6f, 1); + return ConstantTestImpl(workloadFactory, memoryManager, 2e-6f, 1); } LayerTestResult Concatenation1dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation1dTestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation1dTestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation2dDim0Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation2dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation2dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation2dDim1Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation2dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation2dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation2dDim0DiffInputDimsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation2dDim0DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation2dDim0DiffInputDimsTestImpl( + workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation2dDim1DiffInputDimsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation2dDim1DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation2dDim1DiffInputDimsTestImpl( + workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation3dDim0Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation3dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation3dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation3dDim1Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation3dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation3dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation3dDim2Uint8Test( @@ -7128,21 +7197,23 @@ LayerTestResult Concatenation3dDim2Uint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concatenation3dDim2TestImpl(workloadFactory, memoryManager, useSubtensor, 0.5f, -1); + return Concatenation3dDim2TestImpl( + workloadFactory, memoryManager, useSubtensor, 0.5f, -1); } LayerTestResult Concatenation3dDim0DiffInputDimsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation3dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation3dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation3dDim1DiffInputDimsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation3dDim1DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation3dDim1DiffInputDimsTestImpl( + workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation3dDim2DiffInputDimsUint8Test( @@ -7150,56 +7221,61 @@ LayerTestResult Concatenation3dDim2DiffInputDimsUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concatenation3dDim2DiffInputDimsTestImpl(workloadFactory, memoryManager, useSubtensor, 0.5f, -1); + return Concatenation3dDim2DiffInputDimsTestImpl( + workloadFactory, memoryManager, useSubtensor, 0.5f, -1); } LayerTestResult Concatenation4dDim0Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation4dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation4dDim1Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation4dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation4dDim2Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDim2TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation4dDim2TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation4dDim3Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concatenation4dDim3TestImpl(workloadFactory, memoryManager, 0.5f, -1, useSubtensor); + return Concatenation4dDim3TestImpl( + workloadFactory, memoryManager, 0.5f, -1, useSubtensor); } LayerTestResult Concatenation4dDiffShapeDim0Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDiffShapeDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation4dDiffShapeDim0TestImpl( + workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation4dDiffShapeDim1Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDiffShapeDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation4dDiffShapeDim1TestImpl( + workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation4dDiffShapeDim2Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concatenation4dDiffShapeDim2TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concatenation4dDiffShapeDim2TestImpl( + workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concatenation4dDiffShapeDim3Uint8Test( @@ -7207,7 +7283,8 @@ LayerTestResult Concatenation4dDiffShapeDim3Uint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concatenation4dDiffShapeDim3TestImpl(workloadFactory, memoryManager, 0.5f, -1, useSubtensor); + return Concatenation4dDiffShapeDim3TestImpl( + workloadFactory, memoryManager, 0.5f, -1, useSubtensor); } LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2Test( @@ -7215,7 +7292,8 @@ LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool forceNoPadding) { - return SimpleMaxPooling2dSize2x2Stride2x2TestCommon(workloadFactory, memoryManager, forceNoPadding); + return SimpleMaxPooling2dSize2x2Stride2x2TestCommon( + workloadFactory, memoryManager, forceNoPadding); } LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2Uint8Test( @@ -7223,7 +7301,7 @@ LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2Uint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool forceNoPadding) { - return SimpleMaxPooling2dSize2x2Stride2x2TestCommon( + return SimpleMaxPooling2dSize2x2Stride2x2TestCommon( workloadFactory, memoryManager, forceNoPadding, 3.0f, -5); } @@ -7232,7 +7310,8 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool forceNoPadding) { - return SimpleMaxPooling2dSize3x3Stride2x4TestCommon(workloadFactory, memoryManager, forceNoPadding); + return SimpleMaxPooling2dSize3x3Stride2x4TestCommon( + workloadFactory, memoryManager, forceNoPadding); } LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4Uint8Test( @@ -7240,7 +7319,7 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4Uint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool forceNoPadding) { - return SimpleMaxPooling2dSize3x3Stride2x4TestCommon( + return SimpleMaxPooling2dSize3x3Stride2x4TestCommon( workloadFactory, memoryManager, forceNoPadding, 0.1f, 128); } @@ -7249,7 +7328,7 @@ LayerTestResult SimpleMaxPooling2dTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - return SimpleMaxPooling2dTestCommon(workloadFactory, memoryManager, dataLayout); + return SimpleMaxPooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } LayerTestResult SimpleMaxPooling2dUint8Test( @@ -7257,7 +7336,7 @@ LayerTestResult SimpleMaxPooling2dUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - return SimpleMaxPooling2dTestCommon(workloadFactory, memoryManager, dataLayout); + return SimpleMaxPooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } LayerTestResult SimpleAveragePooling2dTest( @@ -7265,7 +7344,7 @@ LayerTestResult SimpleAveragePooling2dTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - return SimpleAveragePooling2dTestCommon(workloadFactory, memoryManager, dataLayout); + return SimpleAveragePooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } LayerTestResult SimpleAveragePooling2dUint8Test( @@ -7273,7 +7352,7 @@ LayerTestResult SimpleAveragePooling2dUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - return SimpleAveragePooling2dTestCommon( + return SimpleAveragePooling2dTestCommon( workloadFactory, memoryManager, dataLayout, 0.5, -1); } @@ -7282,7 +7361,7 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool forceNoPadding) { - return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon( + return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon( workloadFactory, memoryManager, forceNoPadding); } @@ -7290,14 +7369,15 @@ LayerTestResult LargeTensorsAveragePooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return LargeTensorsAveragePooling2dTestCommon(workloadFactory, memoryManager); + return LargeTensorsAveragePooling2dTestCommon(workloadFactory, memoryManager); } LayerTestResult LargeTensorsAveragePooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return LargeTensorsAveragePooling2dTestCommon(workloadFactory, memoryManager, 0.5, -1); + return LargeTensorsAveragePooling2dTestCommon( + workloadFactory, memoryManager, 0.5, -1); } LayerTestResult SimpleL2Pooling2dTest( @@ -7305,7 +7385,7 @@ LayerTestResult SimpleL2Pooling2dTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - return SimpleL2Pooling2dTestCommon(workloadFactory, memoryManager, dataLayout); + return SimpleL2Pooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } LayerTestResult SimpleL2Pooling2dUint8Test( @@ -7313,91 +7393,91 @@ LayerTestResult SimpleL2Pooling2dUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout) { - return SimpleL2Pooling2dTestCommon(workloadFactory, memoryManager, dataLayout); + return SimpleL2Pooling2dTestCommon(workloadFactory, memoryManager, dataLayout); } LayerTestResult L2Pooling2dSize3Stride1Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize3Stride1TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize3Stride1TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize3Stride1Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize3Stride1TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize3Stride1TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize3Stride3Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize3Stride3TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize3Stride3TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize3Stride3Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize3Stride3TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize3Stride3TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize3Stride4Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize3Stride4TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize3Stride4TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize3Stride4Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize3Stride4TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize3Stride4TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize7Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize7TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize7TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize7Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize7TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize7TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize9Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize9TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize9TestCommon(workloadFactory, memoryManager); } LayerTestResult L2Pooling2dSize9Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return L2Pooling2dSize9TestCommon(workloadFactory, memoryManager); + return L2Pooling2dSize9TestCommon(workloadFactory, memoryManager); } LayerTestResult AsymmetricNonSquarePooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return AsymmetricNonSquarePooling2dTestCommon(workloadFactory, memoryManager); + return AsymmetricNonSquarePooling2dTestCommon(workloadFactory, memoryManager); } LayerTestResult AsymmetricNonSquarePooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return AsymmetricNonSquarePooling2dTestCommon(workloadFactory, memoryManager); + return AsymmetricNonSquarePooling2dTestCommon(workloadFactory, memoryManager); } LayerTestResult ComparePooling2dTest( @@ -7406,7 +7486,7 @@ LayerTestResult ComparePooling2dTest( armnn::IWorkloadFactory& refWorkloadFactory, armnn::PoolingAlgorithm poolingType) { - return ComparePooling2dTestCommon( + return ComparePooling2dTestCommon( workloadFactory, memoryManager, refWorkloadFactory, poolingType); } @@ -7416,7 +7496,7 @@ LayerTestResult ComparePooling2dUint8Test( armnn::IWorkloadFactory& refWorkloadFactory, armnn::PoolingAlgorithm poolingType) { - return ComparePooling2dTestCommon( + return ComparePooling2dTestCommon( workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128); } @@ -7425,105 +7505,111 @@ LayerTestResult FullyConnectedLargeTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool transposeWeights) { - return FullyConnectedLargeTestCommon(workloadFactory, memoryManager, transposeWeights); + return FullyConnectedLargeTestCommon(workloadFactory, memoryManager, transposeWeights); } LayerTestResult IgnorePaddingSimpleMaxPooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingSimpleMaxPooling2dTestCommon(workloadFactory, memoryManager); + return IgnorePaddingSimpleMaxPooling2dTestCommon(workloadFactory, memoryManager); } LayerTestResult IgnorePaddingSimpleMaxPooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingSimpleMaxPooling2dTestCommon(workloadFactory, memoryManager, 1.0f, -5); + return IgnorePaddingSimpleMaxPooling2dTestCommon( + workloadFactory, memoryManager, 1.0f, -5); } LayerTestResult IgnorePaddingMaxPooling2dSize3Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingMaxPooling2dSize3TestCommon(workloadFactory, memoryManager); + return IgnorePaddingMaxPooling2dSize3TestCommon(workloadFactory, memoryManager); } LayerTestResult IgnorePaddingMaxPooling2dSize3Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingMaxPooling2dSize3TestCommon(workloadFactory, memoryManager, 1.0f, -5); + return IgnorePaddingMaxPooling2dSize3TestCommon( + workloadFactory, memoryManager, 1.0f, -5); } LayerTestResult IgnorePaddingSimpleAveragePooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingSimpleAveragePooling2dTestCommon(workloadFactory, memoryManager); + return IgnorePaddingSimpleAveragePooling2dTestCommon(workloadFactory, memoryManager); } LayerTestResult IgnorePaddingSimpleAveragePooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingSimpleAveragePooling2dTestCommon(workloadFactory, memoryManager); + return IgnorePaddingSimpleAveragePooling2dTestCommon( + workloadFactory, memoryManager); } LayerTestResult IgnorePaddingSimpleAveragePooling2dNoPaddingTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(workloadFactory, memoryManager); + return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon( + workloadFactory, memoryManager); } LayerTestResult IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(workloadFactory, memoryManager); + return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon( + workloadFactory, memoryManager); } LayerTestResult IgnorePaddingAveragePooling2dSize3Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingAveragePooling2dSize3TestCommon(workloadFactory, memoryManager); + return IgnorePaddingAveragePooling2dSize3TestCommon(workloadFactory, memoryManager); } LayerTestResult IgnorePaddingAveragePooling2dSize3Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingAveragePooling2dSize3TestCommon(workloadFactory, memoryManager); + return IgnorePaddingAveragePooling2dSize3TestCommon( + workloadFactory, memoryManager); } LayerTestResult IgnorePaddingSimpleL2Pooling2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingSimpleL2Pooling2dTestCommon(workloadFactory, memoryManager); + return IgnorePaddingSimpleL2Pooling2dTestCommon(workloadFactory, memoryManager); } LayerTestResult IgnorePaddingSimpleL2Pooling2dUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingSimpleL2Pooling2dTestCommon(workloadFactory, memoryManager); + return IgnorePaddingSimpleL2Pooling2dTestCommon(workloadFactory, memoryManager); } LayerTestResult IgnorePaddingL2Pooling2dSize3Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingL2Pooling2dSize3TestCommon(workloadFactory, memoryManager); + return IgnorePaddingL2Pooling2dSize3TestCommon(workloadFactory, memoryManager); } LayerTestResult IgnorePaddingL2Pooling2dSize3Uint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return IgnorePaddingL2Pooling2dSize3TestCommon(workloadFactory, memoryManager); + return IgnorePaddingL2Pooling2dSize3TestCommon(workloadFactory, memoryManager); } LayerTestResult SimplePermuteFloat32Test( @@ -7800,8 +7886,8 @@ LayerTestResult AdditionAfterMaxPoolTest( // 4, 5, 6 // 7, 8, 9 - armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType()); - armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType()); + armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32); + armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32); boost::multi_array poolingInput = MakeTensor(poolingInputTensorInfo, {1, 2, 3, @@ -7846,8 +7932,8 @@ LayerTestResult AdditionAfterMaxPoolTest( // 12, 16 // 24, 28 - armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType()); - armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType()); + armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32); + armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32); boost::multi_array addInput = MakeTensor(addInputTensorInfo, {12, 16, @@ -7898,112 +7984,112 @@ LayerTestResult SpaceToBatchNdSimpleFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdSimpleTest(workloadFactory, memoryManager); + return SpaceToBatchNdSimpleTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdMultiChannelsFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiChannelsTest(workloadFactory, memoryManager); + return SpaceToBatchNdMultiChannelsTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdMultiBlockFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiBlockTest(workloadFactory, memoryManager); + return SpaceToBatchNdMultiBlockTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdPaddingFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager); + return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdSimpleUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdSimpleTest(workloadFactory, memoryManager); + return SpaceToBatchNdSimpleTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdMultiChannelsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiChannelsTest(workloadFactory, memoryManager); + return SpaceToBatchNdMultiChannelsTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdMultiBlockUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiBlockTest(workloadFactory, memoryManager); + return SpaceToBatchNdMultiBlockTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdPaddingUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager); + return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdSimpleNHWCFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdSimpleNHWCTest(workloadFactory, memoryManager); + return SpaceToBatchNdSimpleNHWCTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdMultiChannelsNHWCFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiChannelsNHWCTest(workloadFactory, memoryManager); + return SpaceToBatchNdMultiChannelsNHWCTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdMultiBlockNHWCFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiBlockNHWCTest(workloadFactory, memoryManager); + return SpaceToBatchNdMultiBlockNHWCTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdPaddingNHWCFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdPaddingNHWCTest(workloadFactory, memoryManager); + return SpaceToBatchNdPaddingNHWCTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdSimpleNHWCUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdSimpleNHWCTest(workloadFactory, memoryManager); + return SpaceToBatchNdSimpleNHWCTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdMultiChannelsNHWCUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiChannelsNHWCTest(workloadFactory, memoryManager); + return SpaceToBatchNdMultiChannelsNHWCTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdMultiBlockNHWCUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiBlockNHWCTest(workloadFactory, memoryManager); + return SpaceToBatchNdMultiBlockNHWCTest(workloadFactory, memoryManager); } LayerTestResult SpaceToBatchNdPaddingNHWCUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdPaddingNHWCTest(workloadFactory, memoryManager); + return SpaceToBatchNdPaddingNHWCTest(workloadFactory, memoryManager); } namespace { @@ -8263,126 +8349,126 @@ LayerTestResult StridedSlice4DFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice4DTest(workloadFactory, memoryManager); + return StridedSlice4DTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice4DReverseFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice4DReverseTest(workloadFactory, memoryManager); + return StridedSlice4DReverseTest(workloadFactory, memoryManager); } LayerTestResult StridedSliceSimpleStrideFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSliceSimpleStrideTest(workloadFactory, memoryManager); + return StridedSliceSimpleStrideTest(workloadFactory, memoryManager); } LayerTestResult StridedSliceSimpleRangeMaskFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSliceSimpleRangeMaskTest(workloadFactory, memoryManager); + return StridedSliceSimpleRangeMaskTest(workloadFactory, memoryManager); } LayerTestResult StridedSliceShrinkAxisMaskFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSliceShrinkAxisMaskTest(workloadFactory, memoryManager); + return StridedSliceShrinkAxisMaskTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice3DFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice3DTest(workloadFactory, memoryManager); + return StridedSlice3DTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice3DReverseFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice3DReverseTest(workloadFactory, memoryManager); + return StridedSlice3DReverseTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice2DFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice2DTest(workloadFactory, memoryManager); + return StridedSlice2DTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice2DReverseFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice2DReverseTest(workloadFactory, memoryManager); + return StridedSlice2DReverseTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice4DUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice4DTest(workloadFactory, memoryManager); + return StridedSlice4DTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice4DReverseUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice4DReverseTest(workloadFactory, memoryManager); + return StridedSlice4DReverseTest(workloadFactory, memoryManager); } LayerTestResult StridedSliceSimpleStrideUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSliceSimpleStrideTest(workloadFactory, memoryManager); + return StridedSliceSimpleStrideTest(workloadFactory, memoryManager); } LayerTestResult StridedSliceSimpleRangeMaskUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSliceSimpleRangeMaskTest(workloadFactory, memoryManager); + return StridedSliceSimpleRangeMaskTest(workloadFactory, memoryManager); } LayerTestResult StridedSliceShrinkAxisMaskUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSliceShrinkAxisMaskTest(workloadFactory, memoryManager); + return StridedSliceShrinkAxisMaskTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice3DUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice3DTest(workloadFactory, memoryManager); + return StridedSlice3DTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice3DReverseUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice3DReverseTest(workloadFactory, memoryManager); + return StridedSlice3DReverseTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice2DUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice2DTest(workloadFactory, memoryManager); + return StridedSlice2DTest(workloadFactory, memoryManager); } LayerTestResult StridedSlice2DReverseUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return StridedSlice2DReverseTest(workloadFactory, memoryManager); + return StridedSlice2DReverseTest(workloadFactory, memoryManager); } LayerTestResult BatchToSpaceNdNhwcUintTest2( armnn::IWorkloadFactory& workloadFactory, @@ -8517,56 +8603,56 @@ LayerTestResult Debug4DFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Debug4DTest(workloadFactory, memoryManager); + return Debug4DTest(workloadFactory, memoryManager); } LayerTestResult Debug3DFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Debug3DTest(workloadFactory, memoryManager); + return Debug3DTest(workloadFactory, memoryManager); } LayerTestResult Debug2DFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Debug2DTest(workloadFactory, memoryManager); + return Debug2DTest(workloadFactory, memoryManager); } LayerTestResult Debug1DFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Debug1DTest(workloadFactory, memoryManager); + return Debug1DTest(workloadFactory, memoryManager); } LayerTestResult Debug4DUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Debug4DTest(workloadFactory, memoryManager); + return Debug4DTest(workloadFactory, memoryManager); } LayerTestResult Debug3DUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Debug3DTest(workloadFactory, memoryManager); + return Debug3DTest(workloadFactory, memoryManager); } LayerTestResult Debug2DUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Debug2DTest(workloadFactory, memoryManager); + return Debug2DTest(workloadFactory, memoryManager); } LayerTestResult Debug1DUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Debug1DTest(workloadFactory, memoryManager); + return Debug1DTest(workloadFactory, memoryManager); } LayerTestResult PreCompiledConvolution2dTest( diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 744470db49..7e955653ca 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -121,6 +121,18 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTest( bool biasEnabled, const armnn::DataLayout layout); +LayerTestResult CompareDepthwiseConvolution2dFloatTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::IWorkloadFactory& refWorkloadFactory, + const armnn::DataLayout layout); + +LayerTestResult CompareDepthwiseConvolution2dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::IWorkloadFactory& refWorkloadFactory, + const armnn::DataLayout layout); + LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/backendsCommon/test/LstmTestImpl.hpp b/src/backends/backendsCommon/test/LstmTestImpl.hpp index 56f40aba84..e300a529ce 100644 --- a/src/backends/backendsCommon/test/LstmTestImpl.hpp +++ b/src/backends/backendsCommon/test/LstmTestImpl.hpp @@ -29,15 +29,15 @@ LayerTestResult LstmNoCifgNoPeepholeNoProjectionTestImpl( unsigned numUnits = outputSize; - armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType()); - armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::GetDataType()); - armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32); + armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32); + armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32); - armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::GetDataType()); - armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::GetDataType()); - armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType()); + armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::DataType::Float32); + armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32); LayerTestResult ret(outputTensorInfo); @@ -91,9 +91,9 @@ LayerTestResult LstmNoCifgNoPeepholeNoProjectionTestImpl( AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - armnn::TensorInfo tensorInfo4({numUnits}, armnn::GetDataType()); - armnn::TensorInfo tensorInfo8({numUnits, 2}, armnn::GetDataType()); - armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::GetDataType()); + armnn::TensorInfo tensorInfo4({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo8({numUnits, 2}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::DataType::Float32); auto inputToInputWeights = MakeTensor(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f, -0.34550029f, 0.04266912f, -0.15680569f, @@ -232,15 +232,15 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl unsigned int inputSize = 5; unsigned numUnits = 20; - armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType()); - armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::GetDataType()); - armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32); + armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32); + armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32); // Scratch buffer size without CIFG [batchSize, numUnits * 4] - armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::GetDataType()); - armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::GetDataType()); - armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType()); + armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::DataType::Float32); + armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32); LayerTestResult ret(outputTensorInfo); @@ -292,11 +292,11 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - armnn::TensorInfo tensorInfo16({outputSize}, armnn::GetDataType()); - armnn::TensorInfo tensorInfo20({numUnits}, armnn::GetDataType()); - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::GetDataType()); - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::GetDataType()); - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::GetDataType()); + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); auto inputToInputWeights = MakeTensor(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f, @@ -950,15 +950,15 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( const unsigned int cellSize = outputSize; // Decide the shape of all input tensors - armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType()); - armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::GetDataType()); - armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32); + armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, armnn::DataType::Float32); unsigned int scratchBufferSize = cifgEnabled ? cellSize * 3 : cellSize * 4; - armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, armnn::GetDataType()); - armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType()); - armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType()); + armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, armnn::DataType::Float32); + armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, armnn::DataType::Float32); + armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32); // List of inputs std::vector inputData; @@ -974,9 +974,9 @@ LayerTestResult LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( // Prepare all the weights in the descriptor for LSTM armnn::LstmQueueDescriptor data; - armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, armnn::GetDataType()); - armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, armnn::GetDataType()); - armnn::TensorInfo tensorInfoNumUnits({cellSize}, armnn::GetDataType()); + armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfoNumUnits({cellSize}, armnn::DataType::Float32); auto inputToCellWeights = MakeTensor(tensorInfoInput, {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f, diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp index e0b8233336..2bdfe286c9 100644 --- a/src/backends/backendsCommon/test/MergerTestImpl.hpp +++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp @@ -4,6 +4,8 @@ // #pragma once +#include "TypeUtils.hpp" + #include #include @@ -47,17 +49,18 @@ INetworkPtr CreateMergerNetwork(const std::vector& inputShapes, return net; } -template +template void MergerDim0EndToEnd(const std::vector& backends) { using namespace armnn; + using T = ResolveType; unsigned int concatAxis = 0; const std::vector inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }}; const TensorShape& outputShape = { 4, 3, 2, 2 }; // Builds up the structure of the network - INetworkPtr net = CreateMergerNetwork()>(inputShapes, outputShape, concatAxis); + INetworkPtr net = CreateMergerNetwork(inputShapes, outputShape, concatAxis); BOOST_TEST_CHECKPOINT("create a network"); @@ -110,17 +113,18 @@ void MergerDim0EndToEnd(const std::vector& backends) EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); } -template +template void MergerDim1EndToEnd(const std::vector& backends) { using namespace armnn; + using T = ResolveType; unsigned int concatAxis = 1; const std::vector inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }}; const TensorShape& outputShape = { 2, 6, 2, 2 }; // Builds up the structure of the network - INetworkPtr net = CreateMergerNetwork()>(inputShapes, outputShape, concatAxis); + INetworkPtr net = CreateMergerNetwork(inputShapes, outputShape, concatAxis); BOOST_TEST_CHECKPOINT("create a network"); @@ -173,17 +177,18 @@ void MergerDim1EndToEnd(const std::vector& backends) EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); } -template +template void MergerDim2EndToEnd(const std::vector& backends) { using namespace armnn; + using T = ResolveType; unsigned int concatAxis = 2; const std::vector inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }}; const TensorShape& outputShape = { 2, 3, 4, 2 }; // Builds up the structure of the network - INetworkPtr net = CreateMergerNetwork()>(inputShapes, outputShape, concatAxis); + INetworkPtr net = CreateMergerNetwork(inputShapes, outputShape, concatAxis); BOOST_TEST_CHECKPOINT("create a network"); @@ -236,7 +241,7 @@ void MergerDim2EndToEnd(const std::vector& backends) EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); } -template +template> void MergerDim3EndToEnd(const std::vector& backends) { using namespace armnn; @@ -246,7 +251,7 @@ void MergerDim3EndToEnd(const std::vector& backends) const TensorShape& outputShape = { 2, 3, 2, 4 }; // Builds up the structure of the network - INetworkPtr net = CreateMergerNetwork()>(inputShapes, outputShape, concatAxis); + INetworkPtr net = CreateMergerNetwork(inputShapes, outputShape, concatAxis); BOOST_TEST_CHECKPOINT("create a network"); diff --git a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp index b542938585..5edf9c802f 100644 --- a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp @@ -27,7 +27,7 @@ #include #include -template +template> LayerTestResult SimplePooling2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -53,10 +53,11 @@ LayerTestResult SimplePooling2dTestImpl( unsigned int outputChannels = boost::numeric_cast(outputExpected.shape()[channelsIndex]); unsigned int outputBatchSize = boost::numeric_cast(outputExpected.shape()[0]); - armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(inputBatchSize, inputChannels, inputHeight, - inputWidth, dataLayout); - armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(outputBatchSize, outputChannels, outputHeight, - outputWidth, dataLayout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo( + inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType); + + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo( + outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -117,7 +118,7 @@ LayerTestResult SimplePooling2dTestImpl( // channels: 2 // batch size: 2 // -template +template> LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -148,8 +149,8 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( unsigned int channels = 2; unsigned int batchSize = 2; - armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -236,11 +237,11 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( })); } - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult SimpleMaxPooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -255,8 +256,8 @@ LayerTestResult SimpleMaxPooling2dTestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; descriptor.m_DataLayout = dataLayout; - armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); - armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -305,11 +306,11 @@ LayerTestResult SimpleMaxPooling2dTestCommon( auto outputExpected = MakeTensor(outputTensorInfo, outputData); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult SimpleAveragePooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -324,8 +325,8 @@ LayerTestResult SimpleAveragePooling2dTestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; descriptor.m_DataLayout = dataLayout; - armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); - armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -374,11 +375,11 @@ LayerTestResult SimpleAveragePooling2dTestCommon( auto outputExpected = MakeTensor(outputTensorInfo, outputData); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult LargeTensorsAveragePooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -395,8 +396,8 @@ LayerTestResult LargeTensorsAveragePooling2dTestCommon( descriptor.m_PadBottom = 50; descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; - armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -425,11 +426,11 @@ LayerTestResult LargeTensorsAveragePooling2dTestCommon( auto outputExpected = MakeTensor(outputTensorInfo, outputVec); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult SimpleL2Pooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -444,8 +445,8 @@ LayerTestResult SimpleL2Pooling2dTestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; descriptor.m_DataLayout = dataLayout; - armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout); - armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout); + armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType); std::vector inputData( QuantizedVector(qScale, qOffset, { @@ -485,11 +486,11 @@ LayerTestResult SimpleL2Pooling2dTestCommon( auto outputExpected = MakeTensor(outputTensorInfo, outputData); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult L2Pooling2dSize3Stride1TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -502,7 +503,7 @@ LayerTestResult L2Pooling2dSize3Stride1TestCommon( descriptor.m_StrideX = descriptor.m_StrideY = 1; descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 2.0f, 1.0f, 5.0f, 2.0f, @@ -511,18 +512,18 @@ LayerTestResult L2Pooling2dSize3Stride1TestCommon( 2.0f, 1.0f, 5.0f, 2.0f, })); - armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 3.0f, 3.0f, 3.0f, 3.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult L2Pooling2dSize3Stride3TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -535,7 +536,7 @@ LayerTestResult L2Pooling2dSize3Stride3TestCommon( descriptor.m_StrideX = descriptor.m_StrideY = 3; descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; - armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, @@ -549,7 +550,7 @@ LayerTestResult L2Pooling2dSize3Stride3TestCommon( 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, })); - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 3.0f, 3.0f, 3.0f, @@ -557,11 +558,11 @@ LayerTestResult L2Pooling2dSize3Stride3TestCommon( 3.0f, 3.0f, 3.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult L2Pooling2dSize3Stride4TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -574,7 +575,7 @@ LayerTestResult L2Pooling2dSize3Stride4TestCommon( descriptor.m_StrideX = descriptor.m_StrideY = 4; descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; - armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f, @@ -586,18 +587,18 @@ LayerTestResult L2Pooling2dSize3Stride4TestCommon( 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f, })); - armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 3.0f, 3.0f, 3.0f, 3.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult L2Pooling2dSize7TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -610,7 +611,7 @@ LayerTestResult L2Pooling2dSize7TestCommon( descriptor.m_StrideX = descriptor.m_StrideY = 7; descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; - armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f, @@ -622,17 +623,17 @@ LayerTestResult L2Pooling2dSize7TestCommon( 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, })); - armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 3.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult L2Pooling2dSize9TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -645,7 +646,7 @@ LayerTestResult L2Pooling2dSize9TestCommon( descriptor.m_StrideX = descriptor.m_StrideY = 9; descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; - armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, @@ -659,25 +660,25 @@ LayerTestResult L2Pooling2dSize9TestCommon( 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, })); - armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { 3.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult AsymmetricNonSquarePooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale = 1.0f, int32_t qOffset = 0) { - armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType); armnn::Pooling2dDescriptor descriptor; descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; @@ -704,11 +705,11 @@ LayerTestResult AsymmetricNonSquarePooling2dTestCommon( 0.0f, 3.0f, 0.0f, 3.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult ComparePooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -737,8 +738,8 @@ LayerTestResult ComparePooling2dTestCommon( unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth }; unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth }; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -820,7 +821,7 @@ LayerTestResult ComparePooling2dTestCommon( // channels: 1 // batch size: 1 // -template +template> LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -870,10 +871,10 @@ LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2TestCommon( 618.0f, 582.0f }; - armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType); // Scale and offset should match input - we're just calculating maximum values. - armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -890,7 +891,7 @@ LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2TestCommon( forceNoPadding ? QuantizedVector(qScale, qOffset, expectedOutputDataNoPadding) : QuantizedVector(qScale, qOffset, expectedOutputDataWithPadding)); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } @@ -903,7 +904,7 @@ LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2TestCommon( // channels: 1 // batch size: 1 // -template +template> LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -948,10 +949,10 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon( 10.5f, }; - armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType); // Scale and offset should match input - we're just calculating average values. - armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -968,12 +969,12 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon( forceNoPadding ? QuantizedVector(qScale, qOffset, expectedOutputDataNoPadding) : QuantizedVector(qScale, qOffset, expectedOutputDataWithPadding)); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult IgnorePaddingSimpleMaxPooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -990,8 +991,8 @@ LayerTestResult IgnorePaddingSimpleMaxPooling2dTestCommon( descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -1017,11 +1018,11 @@ LayerTestResult IgnorePaddingSimpleMaxPooling2dTestCommon( 1.0f, 2.0f, -4.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult IgnorePaddingMaxPooling2dSize3TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1038,8 +1039,8 @@ LayerTestResult IgnorePaddingMaxPooling2dSize3TestCommon( descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -1066,11 +1067,11 @@ LayerTestResult IgnorePaddingMaxPooling2dSize3TestCommon( 2.0f, 2.0f, 2.0f, -3.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult IgnorePaddingSimpleAveragePooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1087,8 +1088,8 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dTestCommon( descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -1114,11 +1115,11 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dTestCommon( 3.0f, 13.0f, 10.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1136,8 +1137,8 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon( descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -1162,11 +1163,11 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon( 2.0f, 3.5f })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult IgnorePaddingAveragePooling2dSize3TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1183,8 +1184,8 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3TestCommon( descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -1211,11 +1212,11 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3TestCommon( 9.0f, 11.0f, 12.0f, 7.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult IgnorePaddingSimpleL2Pooling2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1232,8 +1233,8 @@ LayerTestResult IgnorePaddingSimpleL2Pooling2dTestCommon( descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -1259,11 +1260,11 @@ LayerTestResult IgnorePaddingSimpleL2Pooling2dTestCommon( 8.0f, 1.4142f, 4.0f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } -template +template> LayerTestResult IgnorePaddingL2Pooling2dSize3TestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1280,8 +1281,8 @@ LayerTestResult IgnorePaddingL2Pooling2dSize3TestCommon( descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -1308,6 +1309,6 @@ LayerTestResult IgnorePaddingL2Pooling2dSize3TestCommon( 1.0540f, 1.7638f, 2.5385f, 2.3570f, })); - return SimplePooling2dTestImpl( + return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); } diff --git a/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp index 97199e3c53..25ceda1128 100644 --- a/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp +++ b/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp @@ -19,7 +19,7 @@ #include -template +template> LayerTestResult SimpleSoftmaxTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -32,13 +32,13 @@ LayerTestResult SimpleSoftmaxTestImpl( unsigned int inputShape[] = { 2, 4 }; - inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType); float qScale = 1.f / 256.f; int qOffset = 0; inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); + outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType); outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); @@ -87,7 +87,7 @@ LayerTestResult SimpleSoftmaxTestImpl( return ret; } -template +template> LayerTestResult CompareSoftmaxTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -103,8 +103,8 @@ LayerTestResult CompareSoftmaxTestImpl( unsigned int inputShape[] = { batchSize, channels }; - inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType); float qScale = 1.f / 256.f; int qOffset = 0; inputTensorInfo.SetQuantizationScale(qScale); diff --git a/src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp b/src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp index 814607ddff..756a51cad3 100644 --- a/src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp +++ b/src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp @@ -79,7 +79,7 @@ LayerTestResult SpaceToBatchNdTestImpl( return ret; } -template +template> LayerTestResult SpaceToBatchNdSimpleTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -96,8 +96,8 @@ LayerTestResult SpaceToBatchNdSimpleTest( desc.m_Parameters.m_BlockShape = {2, 2}; desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}}; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -113,7 +113,7 @@ LayerTestResult SpaceToBatchNdSimpleTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult SpaceToBatchNdMultiChannelsTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -130,8 +130,8 @@ LayerTestResult SpaceToBatchNdMultiChannelsTest( desc.m_Parameters.m_BlockShape = {2, 2}; desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}}; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -152,7 +152,7 @@ LayerTestResult SpaceToBatchNdMultiChannelsTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult SpaceToBatchNdMultiBlockTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -169,8 +169,8 @@ LayerTestResult SpaceToBatchNdMultiBlockTest( desc.m_Parameters.m_BlockShape = {2, 2}; desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}}; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -192,7 +192,7 @@ LayerTestResult SpaceToBatchNdMultiBlockTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult SpaceToBatchNdPaddingTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -209,8 +209,8 @@ LayerTestResult SpaceToBatchNdPaddingTest( desc.m_Parameters.m_BlockShape = {2, 2}; desc.m_Parameters.m_PadList = {{0, 0}, {2, 0}}; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -236,34 +236,34 @@ LayerTestResult SpaceToBatchNdPaddingTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult SpaceToBatchNdSimpleNHWCTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdSimpleTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); + return SpaceToBatchNdSimpleTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); } -template +template> LayerTestResult SpaceToBatchNdMultiChannelsNHWCTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiChannelsTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); + return SpaceToBatchNdMultiChannelsTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); } -template +template> LayerTestResult SpaceToBatchNdMultiBlockNHWCTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdMultiBlockTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); + return SpaceToBatchNdMultiBlockTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); } -template +template> LayerTestResult SpaceToBatchNdPaddingNHWCTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); + return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager, armnn::DataLayout::NHWC); } diff --git a/src/backends/backendsCommon/test/SplitterTestImpl.hpp b/src/backends/backendsCommon/test/SplitterTestImpl.hpp index e88356ce21..004060f0b8 100644 --- a/src/backends/backendsCommon/test/SplitterTestImpl.hpp +++ b/src/backends/backendsCommon/test/SplitterTestImpl.hpp @@ -16,7 +16,7 @@ #include -template +template> std::vector> SplitterTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -46,15 +46,15 @@ std::vector> SplitterTestCommon( // Define the tensor descriptors. - armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType); // Outputs of the original split. - armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType); + armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType); // Outputs of the subsequent subtensor split. - armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType()); + armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType); + armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType); // Set quantization parameters if the requested type is a quantized type. // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize. @@ -245,13 +245,13 @@ std::vector> SplitterTestCommon( } -template +template> LayerTestResult CopyViaSplitterTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType()); + const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType); auto input = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, diff --git a/src/backends/backendsCommon/test/StridedSliceTestImpl.hpp b/src/backends/backendsCommon/test/StridedSliceTestImpl.hpp index 1633151108..1bf5c642ad 100644 --- a/src/backends/backendsCommon/test/StridedSliceTestImpl.hpp +++ b/src/backends/backendsCommon/test/StridedSliceTestImpl.hpp @@ -4,6 +4,7 @@ // #pragma once +#include "TypeUtils.hpp" #include "WorkloadTestUtils.hpp" #include @@ -71,7 +72,7 @@ LayerTestResult StridedSliceTestImpl( return ret; } -template +template> LayerTestResult StridedSlice4DTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -87,8 +88,8 @@ LayerTestResult StridedSlice4DTest( desc.m_Parameters.m_End = {2, 2, 3, 1}; desc.m_Parameters.m_Stride = {1, 1, 1, 1}; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -108,7 +109,7 @@ LayerTestResult StridedSlice4DTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult StridedSlice4DReverseTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -124,8 +125,8 @@ LayerTestResult StridedSlice4DReverseTest( desc.m_Parameters.m_End = {2, -3, 3, 1}; desc.m_Parameters.m_Stride = {1, -1, 1, 1}; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -145,7 +146,7 @@ LayerTestResult StridedSlice4DReverseTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult StridedSliceSimpleStrideTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -161,8 +162,8 @@ LayerTestResult StridedSliceSimpleStrideTest( desc.m_Parameters.m_End = {3, 2, 3, 1}; desc.m_Parameters.m_Stride = {2, 2, 2, 1}; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -184,7 +185,7 @@ LayerTestResult StridedSliceSimpleStrideTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult StridedSliceSimpleRangeMaskTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -202,8 +203,8 @@ LayerTestResult StridedSliceSimpleRangeMaskTest( desc.m_Parameters.m_BeginMask = (1 << 4) - 1; desc.m_Parameters.m_EndMask = (1 << 4) - 1; - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); std::vector input = std::vector( { @@ -227,7 +228,7 @@ LayerTestResult StridedSliceSimpleRangeMaskTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult StridedSliceShrinkAxisMaskTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -245,8 +246,8 @@ LayerTestResult StridedSliceShrinkAxisMaskTest( desc.m_Parameters.m_EndMask = (1 << 4) - 1; desc.m_Parameters.m_ShrinkAxisMask = (1 << 1) | (1 << 2); - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType); std::vector input = std::vector( { @@ -266,7 +267,7 @@ LayerTestResult StridedSliceShrinkAxisMaskTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult StridedSlice3DTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -283,8 +284,8 @@ LayerTestResult StridedSlice3DTest( desc.m_Parameters.m_Stride = {2, 2, 2}; desc.m_Parameters.m_EndMask = (1 << 3) - 1; - inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType); std::vector input = std::vector( { @@ -306,7 +307,7 @@ LayerTestResult StridedSlice3DTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult StridedSlice3DReverseTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -322,8 +323,8 @@ LayerTestResult StridedSlice3DReverseTest( desc.m_Parameters.m_End = {-4, -4, -4}; desc.m_Parameters.m_Stride = {-2, -2, -2}; - inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType); std::vector input = std::vector( { @@ -345,7 +346,7 @@ LayerTestResult StridedSlice3DReverseTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult StridedSlice2DTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -362,8 +363,8 @@ LayerTestResult StridedSlice2DTest( desc.m_Parameters.m_Stride = {2, 2}; desc.m_Parameters.m_EndMask = (1 << 2) - 1; - inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType); std::vector input = std::vector( { @@ -385,7 +386,7 @@ LayerTestResult StridedSlice2DTest( workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc); } -template +template> LayerTestResult StridedSlice2DReverseTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -403,8 +404,8 @@ LayerTestResult StridedSlice2DReverseTest( desc.m_Parameters.m_BeginMask = (1 << 2) - 1; desc.m_Parameters.m_EndMask = (1 << 2) - 1; - inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType()); - outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType()); + inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType); + outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType); std::vector input = std::vector( { diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp index bf299dc0b5..ba94353049 100644 --- a/src/backends/cl/test/ClEndToEndTests.cpp +++ b/src/backends/cl/test/ClEndToEndTests.cpp @@ -19,32 +19,32 @@ BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32) BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test) { - MergerDim0EndToEnd(defaultBackends); + MergerDim0EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test) { - MergerDim0EndToEnd(defaultBackends); + MergerDim0EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test) { - MergerDim1EndToEnd(defaultBackends); + MergerDim1EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test) { - MergerDim1EndToEnd(defaultBackends); + MergerDim1EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test) { - MergerDim3EndToEnd(defaultBackends); + MergerDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test) { - MergerDim3EndToEnd(defaultBackends); + MergerDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 96f2f1fe77..2fe03abb3c 100755 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -414,17 +414,17 @@ ARMNN_AUTO_TEST_CASE(StridedSlice2DReverseUint8, StridedSlice2DReverseUint8Test) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, - CompareDepthwiseConvolution2dTest, + CompareDepthwiseConvolution2dFloatTest, armnn::DataLayout::NCHW) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, - CompareDepthwiseConvolution2dTest, + CompareDepthwiseConvolution2dUint8Test, armnn::DataLayout::NCHW) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32Nhwc, - CompareDepthwiseConvolution2dTest, + CompareDepthwiseConvolution2dFloatTest, armnn::DataLayout::NHWC) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8Nhwc, - CompareDepthwiseConvolution2dTest, + CompareDepthwiseConvolution2dUint8Test, armnn::DataLayout::NHWC) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest, diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp index 6f44cc4772..1eeb9ed98f 100644 --- a/src/backends/cl/test/OpenClTimerTest.cpp +++ b/src/backends/cl/test/OpenClTimerTest.cpp @@ -54,9 +54,9 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm) int32_t qOffset = 0; float qScale = 0.f; - TensorInfo inputTensorInfo({num, channels, height, width}, GetDataType()); - TensorInfo outputTensorInfo({num, channels, height, width}, GetDataType()); - TensorInfo tensorInfo({channels}, GetDataType()); + TensorInfo inputTensorInfo({num, channels, height, width}, DataType::Float32); + TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32); + TensorInfo tensorInfo({channels}, DataType::Float32); // Set quantization parameters if the requested type is a quantized type. if(IsQuantizedType()) @@ -143,4 +143,4 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm) BOOST_AUTO_TEST_SUITE_END() -#endif //aarch64 or x86_64 \ No newline at end of file +#endif //aarch64 or x86_64 diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp index 3ca415a1d1..665791a36a 100644 --- a/src/backends/neon/test/NeonEndToEndTests.cpp +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -53,32 +53,32 @@ BOOST_AUTO_TEST_CASE(FallbackToCpuRef) BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test) { - MergerDim0EndToEnd(defaultBackends); + MergerDim0EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test) { - MergerDim0EndToEnd(defaultBackends); + MergerDim0EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test) { - MergerDim1EndToEnd(defaultBackends); + MergerDim1EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test) { - MergerDim1EndToEnd(defaultBackends); + MergerDim1EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test) { - MergerDim3EndToEnd(defaultBackends); + MergerDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test) { - MergerDim3EndToEnd(defaultBackends); + MergerDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 09b47e5b3e..3d34934d06 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -440,17 +440,17 @@ ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhw ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, - CompareDepthwiseConvolution2dTest, + CompareDepthwiseConvolution2dFloatTest, armnn::DataLayout::NCHW) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, - CompareDepthwiseConvolution2dTest, + CompareDepthwiseConvolution2dUint8Test, armnn::DataLayout::NCHW) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32Nhwc, - CompareDepthwiseConvolution2dTest, + CompareDepthwiseConvolution2dFloatTest, armnn::DataLayout::NHWC) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8Nhwc, - CompareDepthwiseConvolution2dTest, + CompareDepthwiseConvolution2dUint8Test, armnn::DataLayout::NHWC) ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest, diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp index a9b3193692..d2d4460341 100644 --- a/src/backends/neon/test/NeonTimerTest.cpp +++ b/src/backends/neon/test/NeonTimerTest.cpp @@ -59,10 +59,10 @@ BOOST_AUTO_TEST_CASE(NeonTimerMeasure) unsigned int outputBatchSize = inputBatchSize; armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, - armnn::GetDataType()); + armnn::DataType::Float32); armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, - armnn::GetDataType()); + armnn::DataType::Float32); LayerTestResult result(inputTensorInfo); diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 9a4e60162f..4f4a161509 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -317,7 +317,7 @@ BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest) const std::vector expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }); - ArithmeticSimpleEndToEnd(defaultBackends, LayerType::Equal, expectedOutput); + ArithmeticSimpleEndToEnd(defaultBackends, LayerType::Equal, expectedOutput); } BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest) @@ -325,7 +325,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest) const std::vector expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); - ArithmeticSimpleEndToEnd(defaultBackends, LayerType::Greater, expectedOutput); + ArithmeticSimpleEndToEnd(defaultBackends, LayerType::Greater, expectedOutput); } BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test) @@ -333,7 +333,7 @@ BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test) const std::vector expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }); - ArithmeticSimpleEndToEnd(defaultBackends, LayerType::Equal, expectedOutput); + ArithmeticSimpleEndToEnd(defaultBackends, LayerType::Equal, expectedOutput); } BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test) @@ -341,7 +341,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test) const std::vector expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); - ArithmeticSimpleEndToEnd(defaultBackends, LayerType::Greater, expectedOutput); + ArithmeticSimpleEndToEnd(defaultBackends, LayerType::Greater, expectedOutput); } BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest) @@ -349,7 +349,7 @@ BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest) const std::vector expectedOutput({ 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); - ArithmeticBroadcastEndToEnd(defaultBackends, LayerType::Equal, expectedOutput); + ArithmeticBroadcastEndToEnd(defaultBackends, LayerType::Equal, expectedOutput); } BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest) @@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest) const std::vector expectedOutput({ 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1 }); - ArithmeticBroadcastEndToEnd(defaultBackends, LayerType::Greater, expectedOutput); + ArithmeticBroadcastEndToEnd(defaultBackends, LayerType::Greater, expectedOutput); } BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test) @@ -365,7 +365,7 @@ BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test) const std::vector expectedOutput({ 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 }); - ArithmeticBroadcastEndToEnd(defaultBackends, LayerType::Equal, expectedOutput); + ArithmeticBroadcastEndToEnd(defaultBackends, LayerType::Equal, expectedOutput); } BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test) @@ -373,47 +373,47 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test) const std::vector expectedOutput({ 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1 }); - ArithmeticBroadcastEndToEnd(defaultBackends, LayerType::Greater, expectedOutput); + ArithmeticBroadcastEndToEnd(defaultBackends, LayerType::Greater, expectedOutput); } BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test) { - MergerDim0EndToEnd(defaultBackends); + MergerDim0EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test) { - MergerDim0EndToEnd(defaultBackends); + MergerDim0EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test) { - MergerDim1EndToEnd(defaultBackends); + MergerDim1EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test) { - MergerDim1EndToEnd(defaultBackends); + MergerDim1EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test) { - MergerDim2EndToEnd(defaultBackends); + MergerDim2EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test) { - MergerDim2EndToEnd(defaultBackends); + MergerDim2EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test) { - MergerDim3EndToEnd(defaultBackends); + MergerDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test) { - MergerDim3EndToEnd(defaultBackends); + MergerDim3EndToEnd(defaultBackends); } -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file +BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1