aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2019-01-22 16:10:44 +0000
committerNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2019-01-22 16:10:44 +0000
commit649dd9515ddf4bd00a0bff64d51dfd835a6c7b39 (patch)
treec938bc8eb11dd24223c0cb00a57d4372a907b943
parent382e21ce95c04479a6900afca81a57949b369f1e (diff)
downloadarmnn-649dd9515ddf4bd00a0bff64d51dfd835a6c7b39.tar.gz
IVGCVSW-2467 Remove GetDataType<T> function
Change-Id: I7359617a307b9abb4c30b3d5f2364dc6d0f828f0
-rw-r--r--CMakeLists.txt1
-rw-r--r--include/armnn/TypesUtils.hpp46
-rw-r--r--src/armnn/CompatibleTypes.hpp44
-rw-r--r--src/armnn/test/UtilsTests.cpp11
-rw-r--r--src/armnnTfLiteParser/test/Activations.cpp8
-rw-r--r--src/armnnTfLiteParser/test/Addition.cpp10
-rw-r--r--src/armnnTfLiteParser/test/AvgPool2D.cpp9
-rw-r--r--src/armnnTfLiteParser/test/Concatenation.cpp97
-rw-r--r--src/armnnTfLiteParser/test/Conv2D.cpp8
-rw-r--r--src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp6
-rw-r--r--src/armnnTfLiteParser/test/FullyConnected.cpp4
-rw-r--r--src/armnnTfLiteParser/test/MaxPool2D.cpp9
-rw-r--r--src/armnnTfLiteParser/test/Mean.cpp5
-rw-r--r--src/armnnTfLiteParser/test/Multiplication.cpp25
-rw-r--r--src/armnnTfLiteParser/test/Pad.cpp14
-rw-r--r--src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp38
-rw-r--r--src/armnnTfLiteParser/test/Reshape.cpp24
-rw-r--r--src/armnnTfLiteParser/test/Softmax.cpp3
-rw-r--r--src/armnnTfLiteParser/test/Squeeze.cpp4
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp2
-rw-r--r--src/armnnUtils/TensorUtils.cpp19
-rw-r--r--src/armnnUtils/TensorUtils.hpp20
-rw-r--r--src/backends/backendsCommon/CpuTensorHandle.hpp5
-rw-r--r--src/backends/backendsCommon/test/ActivationTestImpl.hpp81
-rw-r--r--src/backends/backendsCommon/test/ArithmeticTestImpl.hpp10
-rw-r--r--src/backends/backendsCommon/test/BatchNormTestImpl.hpp17
-rwxr-xr-xsrc/backends/backendsCommon/test/Conv2dTestImpl.hpp108
-rw-r--r--src/backends/backendsCommon/test/DebugTestImpl.hpp24
-rw-r--r--src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp11
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp1364
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp12
-rw-r--r--src/backends/backendsCommon/test/LstmTestImpl.hpp64
-rw-r--r--src/backends/backendsCommon/test/MergerTestImpl.hpp21
-rw-r--r--src/backends/backendsCommon/test/Pooling2dTestImpl.hpp177
-rw-r--r--src/backends/backendsCommon/test/SoftmaxTestImpl.hpp12
-rw-r--r--src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp40
-rw-r--r--src/backends/backendsCommon/test/SplitterTestImpl.hpp16
-rw-r--r--src/backends/backendsCommon/test/StridedSliceTestImpl.hpp55
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp12
-rwxr-xr-xsrc/backends/cl/test/ClLayerTests.cpp8
-rw-r--r--src/backends/cl/test/OpenClTimerTest.cpp8
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp12
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp8
-rw-r--r--src/backends/neon/test/NeonTimerTest.cpp4
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp34
45 files changed, 1318 insertions, 1192 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 87d91b96b4..682e2cf688 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -255,6 +255,7 @@ list(APPEND armnn_sources
src/armnn/layers/SubtractionLayer.cpp
src/armnn/layers/SubtractionLayer.hpp
src/armnn/BackendSettings.hpp
+ src/armnn/CompatibleTypes.hpp
src/armnn/Descriptors.cpp
src/armnn/DeviceSpec.hpp
src/armnn/Exceptions.cpp
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index 8c4ceb8d4f..3ed1dfbcb5 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -151,45 +151,6 @@ struct IsHalfType
: std::integral_constant<bool, std::is_floating_point<T>::value && sizeof(T) == 2>
{};
-template<typename T, typename U=T>
-struct GetDataTypeImpl;
-
-template<typename T>
-struct GetDataTypeImpl<T, typename std::enable_if_t<IsHalfType<T>::value, T>>
-{
- static constexpr DataType Value = DataType::Float16;
-};
-
-template<>
-struct GetDataTypeImpl<float>
-{
- static constexpr DataType Value = DataType::Float32;
-};
-
-template<>
-struct GetDataTypeImpl<uint8_t>
-{
- static constexpr DataType Value = DataType::QuantisedAsymm8;
-};
-
-template<>
-struct GetDataTypeImpl<int32_t>
-{
- static constexpr DataType Value = DataType::Signed32;
-};
-
-template<>
-struct GetDataTypeImpl<bool>
-{
- static constexpr DataType Value = DataType::Boolean;
-};
-
-template <typename T>
-constexpr DataType GetDataType()
-{
- return GetDataTypeImpl<T>::Value;
-}
-
template<typename T>
constexpr bool IsQuantizedType()
{
@@ -257,16 +218,15 @@ inline float Dequantize(QuantizedType value, float scale, int32_t offset)
return dequantized;
}
-template <typename DataType>
+template <armnn::DataType DataType>
void VerifyTensorInfoDataType(const armnn::TensorInfo & info)
{
- auto expectedType = armnn::GetDataType<DataType>();
- if (info.GetDataType() != expectedType)
+ if (info.GetDataType() != DataType)
{
std::stringstream ss;
ss << "Unexpected datatype:" << armnn::GetDataTypeName(info.GetDataType())
<< " for tensor:" << info.GetShape()
- << ". The type expected to be: " << armnn::GetDataTypeName(expectedType);
+ << ". The type expected to be: " << armnn::GetDataTypeName(DataType);
throw armnn::Exception(ss.str());
}
}
diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp
new file mode 100644
index 0000000000..2449876544
--- /dev/null
+++ b/src/armnn/CompatibleTypes.hpp
@@ -0,0 +1,44 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/Types.hpp"
+#include "Half.hpp"
+
+namespace armnn
+{
+
+template<typename T>
+bool CompatibleTypes(DataType dataType)
+{
+ return false;
+}
+
+template<>
+inline bool CompatibleTypes<float>(DataType dataType)
+{
+ return dataType == DataType::Float32;
+}
+
+template<>
+inline bool CompatibleTypes<Half>(DataType dataType)
+{
+ return dataType == DataType::Float16;
+}
+
+template<>
+inline bool CompatibleTypes<uint8_t>(DataType dataType)
+{
+ return dataType == DataType::Boolean || dataType == DataType::QuantisedAsymm8;
+}
+
+template<>
+inline bool CompatibleTypes<int32_t>(DataType dataType)
+{
+ return dataType == DataType::Signed32;
+}
+
+} //namespace armnn
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index 9933137edc..c81a4b67b6 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -23,14 +23,6 @@ BOOST_AUTO_TEST_CASE(DataTypeSize)
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
}
-BOOST_AUTO_TEST_CASE(GetDataTypeTest)
-{
- BOOST_TEST((armnn::GetDataType<float>() == armnn::DataType::Float32));
- BOOST_TEST((armnn::GetDataType<uint8_t>() == armnn::DataType::QuantisedAsymm8));
- BOOST_TEST((armnn::GetDataType<int32_t>() == armnn::DataType::Signed32));
- BOOST_TEST((armnn::GetDataType<bool>() == armnn::DataType::Boolean));
-}
-
BOOST_AUTO_TEST_CASE(PermuteDescriptorWithTooManyMappings)
{
BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 4u }), armnn::InvalidArgumentException);
@@ -81,9 +73,6 @@ BOOST_AUTO_TEST_CASE(HalfType)
constexpr bool isHalfType = std::is_same<armnn::Half, ResolvedType>::value;
BOOST_CHECK(isHalfType);
- armnn::DataType dt = armnn::GetDataType<armnn::Half>();
- BOOST_CHECK(dt == armnn::DataType::Float16);
-
//Test utility functions return correct size
BOOST_CHECK(GetDataTypeSize(armnn::DataType::Float16) == 2);
diff --git a/src/armnnTfLiteParser/test/Activations.cpp b/src/armnnTfLiteParser/test/Activations.cpp
index a30d46408c..534ae4cb73 100644
--- a/src/armnnTfLiteParser/test/Activations.cpp
+++ b/src/armnnTfLiteParser/test/Activations.cpp
@@ -70,8 +70,8 @@ struct ReLuFixture : ActivationFixture
};
BOOST_FIXTURE_TEST_CASE(ParseReLu, ReLuFixture)
{
- RunTest<2, float>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
- { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
+ RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+ { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
}
struct ReLu6Fixture : ActivationFixture
@@ -80,8 +80,8 @@ struct ReLu6Fixture : ActivationFixture
};
BOOST_FIXTURE_TEST_CASE(ParseReLu6, ReLu6Fixture)
{
- RunTest<2, float>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
- { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
+ RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+ { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Addition.cpp b/src/armnnTfLiteParser/test/Addition.cpp
index 53a0c40337..94389d3134 100644
--- a/src/armnnTfLiteParser/test/Addition.cpp
+++ b/src/armnnTfLiteParser/test/Addition.cpp
@@ -97,11 +97,11 @@ struct SimpleAddFixture : AddFixture
BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
{
- RunTest<2, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 4, 6, 8, 10 }}});
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 4, 6, 8, 10 }}});
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/AvgPool2D.cpp b/src/armnnTfLiteParser/test/AvgPool2D.cpp
index 2fac9079c8..a39c088d44 100644
--- a/src/armnnTfLiteParser/test/AvgPool2D.cpp
+++ b/src/armnnTfLiteParser/test/AvgPool2D.cpp
@@ -98,22 +98,23 @@ struct AvgPoolLiteFixture2DOutput : AvgPool2DFixture
BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
{
- RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 });
}
BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
{
- RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
+ RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
}
BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
{
- RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
}
BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Concatenation.cpp b/src/armnnTfLiteParser/test/Concatenation.cpp
index 8629efe3d7..bb5aebf39c 100644
--- a/src/armnnTfLiteParser/test/Concatenation.cpp
+++ b/src/armnnTfLiteParser/test/Concatenation.cpp
@@ -100,10 +100,11 @@ struct ConcatenationFixtureNegativeDim : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
}
struct ConcatenationFixtureNCHW : ConcatenationFixture
@@ -113,10 +114,11 @@ struct ConcatenationFixtureNCHW : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
}
struct ConcatenationFixtureNHWC : ConcatenationFixture
@@ -126,10 +128,11 @@ struct ConcatenationFixtureNHWC : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 4, 5, 2, 3, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 4, 5, 2, 3, 6, 7 }}});
}
struct ConcatenationFixtureDim1 : ConcatenationFixture
@@ -139,15 +142,16 @@ struct ConcatenationFixtureDim1 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1)
{
- RunTest<4, uint8_t>(0,
- { { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
- { "inputTensor2", { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } },
- { { "outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
- 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ { { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
+ { "inputTensor2", { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } },
+ { { "outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } });
}
struct ConcatenationFixtureDim3 : ConcatenationFixture
@@ -157,31 +161,32 @@ struct ConcatenationFixtureDim3 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3)
{
- RunTest<4, uint8_t>(0,
- { { "inputTensor1", { 0, 1, 2, 3,
- 4, 5, 6, 7,
- 8, 9, 10, 11,
- 12, 13, 14, 15,
- 16, 17, 18, 19,
- 20, 21, 22, 23 } },
- { "inputTensor2", { 50, 51, 52, 53,
- 54, 55, 56, 57,
- 58, 59, 60, 61,
- 62, 63, 64, 65,
- 66, 67, 68, 69,
- 70, 71, 72, 73 } } },
- { { "outputTensor", { 0, 1, 2, 3,
- 50, 51, 52, 53,
- 4, 5, 6, 7,
- 54, 55, 56, 57,
- 8, 9, 10, 11,
- 58, 59, 60, 61,
- 12, 13, 14, 15,
- 62, 63, 64, 65,
- 16, 17, 18, 19,
- 66, 67, 68, 69,
- 20, 21, 22, 23,
- 70, 71, 72, 73 } } });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ { { "inputTensor1", { 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11,
+ 12, 13, 14, 15,
+ 16, 17, 18, 19,
+ 20, 21, 22, 23 } },
+ { "inputTensor2", { 50, 51, 52, 53,
+ 54, 55, 56, 57,
+ 58, 59, 60, 61,
+ 62, 63, 64, 65,
+ 66, 67, 68, 69,
+ 70, 71, 72, 73 } } },
+ { { "outputTensor", { 0, 1, 2, 3,
+ 50, 51, 52, 53,
+ 4, 5, 6, 7,
+ 54, 55, 56, 57,
+ 8, 9, 10, 11,
+ 58, 59, 60, 61,
+ 12, 13, 14, 15,
+ 62, 63, 64, 65,
+ 16, 17, 18, 19,
+ 66, 67, 68, 69,
+ 20, 21, 22, 23,
+ 70, 71, 72, 73 } } });
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
index 79bef733c9..38c6675ddb 100644
--- a/src/armnnTfLiteParser/test/Conv2D.cpp
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -89,7 +89,7 @@ struct SimpleConv2DFixture : public ParserFlatbuffersFixture
BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2, 3,
@@ -219,7 +219,7 @@ struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
@@ -290,7 +290,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture
uint8_t outZero = 20;
uint8_t fz = 4; // filter zero point
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
@@ -331,7 +331,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixtu
{
uint8_t relu6Min = 6 / 2; // divide by output scale
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
index e8262f8313..c0767801b3 100644
--- a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
@@ -133,7 +133,7 @@ struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
@@ -160,7 +160,7 @@ struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
@@ -185,7 +185,7 @@ struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
diff --git a/src/armnnTfLiteParser/test/FullyConnected.cpp b/src/armnnTfLiteParser/test/FullyConnected.cpp
index 14ca57c2ab..7ee64a476e 100644
--- a/src/armnnTfLiteParser/test/FullyConnected.cpp
+++ b/src/armnnTfLiteParser/test/FullyConnected.cpp
@@ -125,7 +125,7 @@ struct FullyConnectedWithNoBiasFixture : FullyConnectedFixture
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
{
- RunTest<2, uint8_t>(
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
0,
{ 10, 20, 30, 40 },
{ 400/2 });
@@ -145,7 +145,7 @@ struct FullyConnectedWithBiasFixture : FullyConnectedFixture
BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture)
{
- RunTest<2, uint8_t>(
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
0,
{ 10, 20, 30, 40 },
{ (400+10)/2 });
diff --git a/src/armnnTfLiteParser/test/MaxPool2D.cpp b/src/armnnTfLiteParser/test/MaxPool2D.cpp
index 06bf7806cc..759fc37ccd 100644
--- a/src/armnnTfLiteParser/test/MaxPool2D.cpp
+++ b/src/armnnTfLiteParser/test/MaxPool2D.cpp
@@ -98,22 +98,23 @@ struct MaxPoolLiteFixtureUint2DOutput : MaxPool2DFixture
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
{
- RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
{
- RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f });
+ RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
{
- RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Mean.cpp b/src/armnnTfLiteParser/test/Mean.cpp
index d7cb2a20f0..3f0fdf14d0 100644
--- a/src/armnnTfLiteParser/test/Mean.cpp
+++ b/src/armnnTfLiteParser/test/Mean.cpp
@@ -91,9 +91,8 @@ struct SimpleMeanNoReduceFixture : public MeanNoReduceFixture
BOOST_FIXTURE_TEST_CASE(ParseMeanNoReduce, SimpleMeanNoReduceFixture)
{
- RunTest<2, float>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } },
- {{ "outputTensor", { 1.5f } } });
+ RunTest<2, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } },
+ {{ "outputTensor", { 1.5f } } });
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/Multiplication.cpp b/src/armnnTfLiteParser/test/Multiplication.cpp
index 802799c2b4..f7e2edd546 100644
--- a/src/armnnTfLiteParser/test/Multiplication.cpp
+++ b/src/armnnTfLiteParser/test/Multiplication.cpp
@@ -94,19 +94,18 @@ struct SimpleMultiplicationFixture : public MultiplicationFixture
BOOST_FIXTURE_TEST_CASE(ParseMultiplication, SimpleMultiplicationFixture)
{
- RunTest<4, float>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 5.0f,
- 6.0f, 7.0f, 8.0f,
- 9.0f, 10.0f, 11.0f } },
- { "inputTensor2", { 1.0f, 1.0f, 1.0f,
- 5.0f, 5.0f, 5.0f,
- 1.0f, 1.0f, 1.0f,
- 5.0f, 5.0f, 5.0f} } },
- {{ "outputTensor", { 0.0f, 1.0f, 2.0f,
- 15.0f, 20.0f, 25.0f,
- 6.0f, 7.0f, 8.0f,
- 45.0f, 50.0f, 55.0f } } });
+ RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } },
+ { "inputTensor2", { 1.0f, 1.0f, 1.0f,
+ 5.0f, 5.0f, 5.0f,
+ 1.0f, 1.0f, 1.0f,
+ 5.0f, 5.0f, 5.0f} } },
+ {{ "outputTensor", { 0.0f, 1.0f, 2.0f,
+ 15.0f, 20.0f, 25.0f,
+ 6.0f, 7.0f, 8.0f,
+ 45.0f, 50.0f, 55.0f } } });
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/Pad.cpp b/src/armnnTfLiteParser/test/Pad.cpp
index 09b744a7ce..bdc8478ca2 100644
--- a/src/armnnTfLiteParser/test/Pad.cpp
+++ b/src/armnnTfLiteParser/test/Pad.cpp
@@ -92,13 +92,13 @@ struct SimplePadFixture : public PadFixture
BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture)
{
- RunTest<2, float>(0,
- {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}},
- {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
+ RunTest<2, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}},
+ {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index b372a604f3..8d0ee01aa9 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -14,6 +14,7 @@
#include <armnn/TypesUtils.hpp>
#include "test/TensorHelpers.hpp"
+#include "TypeUtils.hpp"
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include <backendsCommon/BackendRegistry.hpp>
@@ -116,14 +117,18 @@ struct ParserFlatbuffersFixture
/// Executes the network with the given input tensor and checks the result against the given output tensor.
/// This overload assumes the network has a single input and a single output.
- template <std::size_t NumOutputDimensions, typename DataType>
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType = armnn::ResolveType<ArmnnType>>
void RunTest(size_t subgraphId,
- const std::vector<DataType>& inputData,
- const std::vector<DataType>& expectedOutputData);
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData);
/// Executes the network with the given input tensors and checks the results against the given output tensors.
/// This overload supports multiple inputs and multiple outputs, identified by name.
- template <std::size_t NumOutputDimensions, typename DataType>
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType = armnn::ResolveType<ArmnnType>>
void RunTest(size_t subgraphId,
const std::map<std::string, std::vector<DataType>>& inputData,
const std::map<std::string, std::vector<DataType>>& expectedOutputData);
@@ -152,21 +157,24 @@ struct ParserFlatbuffersFixture
}
};
-template <std::size_t NumOutputDimensions, typename DataType>
+template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType>
void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
const std::vector<DataType>& inputData,
const std::vector<DataType>& expectedOutputData)
{
- RunTest<NumOutputDimensions, DataType>(subgraphId,
- { { m_SingleInputName, inputData } },
- { { m_SingleOutputName, expectedOutputData } });
+ RunTest<NumOutputDimensions, ArmnnType>(subgraphId,
+ { { m_SingleInputName, inputData } },
+ { { m_SingleOutputName, expectedOutputData } });
}
-template <std::size_t NumOutputDimensions, typename DataType>
-void
-ParserFlatbuffersFixture::RunTest(size_t subgraphId,
- const std::map<std::string, std::vector<DataType>>& inputData,
- const std::map<std::string, std::vector<DataType>>& expectedOutputData)
+template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType>
+void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData)
{
using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
@@ -175,7 +183,7 @@ ParserFlatbuffersFixture::RunTest(size_t subgraphId,
for (auto&& it : inputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
- armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
}
@@ -185,7 +193,7 @@ ParserFlatbuffersFixture::RunTest(size_t subgraphId,
for (auto&& it : expectedOutputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
- armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
outputTensors.push_back(
{ bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
diff --git a/src/armnnTfLiteParser/test/Reshape.cpp b/src/armnnTfLiteParser/test/Reshape.cpp
index ae5a09a711..ef4b761945 100644
--- a/src/armnnTfLiteParser/test/Reshape.cpp
+++ b/src/armnnTfLiteParser/test/Reshape.cpp
@@ -86,9 +86,9 @@ struct ReshapeFixtureWithReshapeDims : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({3,3})));
}
@@ -101,9 +101,9 @@ struct ReshapeFixtureWithReshapeDimsFlatten : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({1,9})));
}
@@ -116,9 +116,9 @@ struct ReshapeFixtureWithReshapeDimsFlattenTwoDims : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,9})));
}
@@ -131,9 +131,9 @@ struct ReshapeFixtureWithReshapeDimsFlattenOneDim : ReshapeFixture
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, uint8_t>(0,
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
+ RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,3,3})));
}
diff --git a/src/armnnTfLiteParser/test/Softmax.cpp b/src/armnnTfLiteParser/test/Softmax.cpp
index 957e61b944..dacd946352 100644
--- a/src/armnnTfLiteParser/test/Softmax.cpp
+++ b/src/armnnTfLiteParser/test/Softmax.cpp
@@ -71,8 +71,7 @@ struct SoftmaxFixture : public ParserFlatbuffersFixture
BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
{
- RunTest<2, uint8_t>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
}
BOOST_AUTO_TEST_SUITE_END()
-
diff --git a/src/armnnTfLiteParser/test/Squeeze.cpp b/src/armnnTfLiteParser/test/Squeeze.cpp
index 5ee74243c4..7f6fb276fc 100644
--- a/src/armnnTfLiteParser/test/Squeeze.cpp
+++ b/src/armnnTfLiteParser/test/Squeeze.cpp
@@ -85,7 +85,7 @@ struct SqueezeFixtureWithSqueezeDims : SqueezeFixture
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<3, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2,1})));
@@ -99,7 +99,7 @@ struct SqueezeFixtureWithoutSqueezeDims : SqueezeFixture
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2})));
}
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 0087ef83bf..15a91d5275 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -866,7 +866,7 @@ public:
m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
m_TensorInfo(tensorInfo)
{
- BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
+ BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
}
void CreateLayerDeferred() override
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index 2c25eec163..57f823fe13 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -27,5 +27,24 @@ armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
}
}
+armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
+ unsigned int numberOfChannels,
+ unsigned int height,
+ unsigned int width,
+ const armnn::DataLayout dataLayout,
+ const armnn::DataType dataType)
+{
+ switch (dataLayout)
+ {
+ case armnn::DataLayout::NCHW:
+ return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, dataType);
+ case armnn::DataLayout::NHWC:
+ return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, dataType);
+ default:
+ throw armnn::InvalidArgumentException("Unknown data layout ["
+ + std::to_string(static_cast<int>(dataLayout)) +
+ "]", CHECK_LOCATION());
+ }
}
+}
diff --git a/src/armnnUtils/TensorUtils.hpp b/src/armnnUtils/TensorUtils.hpp
index 6461b37f75..fb5e6eb10d 100644
--- a/src/armnnUtils/TensorUtils.hpp
+++ b/src/armnnUtils/TensorUtils.hpp
@@ -15,23 +15,11 @@ armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
unsigned int width,
const armnn::DataLayout dataLayout);
-template<typename T>
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
unsigned int numberOfChannels,
unsigned int height,
unsigned int width,
- const armnn::DataLayout dataLayout)
-{
- switch (dataLayout)
- {
- case armnn::DataLayout::NCHW:
- return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
- case armnn::DataLayout::NHWC:
- return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
- default:
- throw armnn::InvalidArgumentException("Unknown data layout ["
- + std::to_string(static_cast<int>(dataLayout)) +
- "]", CHECK_LOCATION());
- }
-}
-} // namespace armnnUtils \ No newline at end of file
+ const armnn::DataLayout dataLayout,
+ const armnn::DataType dataType);
+
+} // namespace armnnUtils
diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/CpuTensorHandle.hpp
index b88a0d385b..dd6413f2e7 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.hpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.hpp
@@ -5,6 +5,7 @@
#pragma once
#include "CpuTensorHandleFwd.hpp"
+#include "CompatibleTypes.hpp"
#include <armnn/TypesUtils.hpp>
@@ -22,7 +23,7 @@ public:
template <typename T>
const T* GetConstTensor() const
{
- BOOST_ASSERT(GetTensorInfo().GetDataType() == GetDataType<T>());
+ BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<const T*>(m_Memory);
}
@@ -82,7 +83,7 @@ public:
template <typename T>
T* GetTensor() const
{
- BOOST_ASSERT(GetTensorInfo().GetDataType() == GetDataType<T>());
+ BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<T*>(m_MutableMemory);
}
diff --git a/src/backends/backendsCommon/test/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/ActivationTestImpl.hpp
index 46c700ce02..ca6130299b 100644
--- a/src/backends/backendsCommon/test/ActivationTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ActivationTestImpl.hpp
@@ -19,7 +19,7 @@
#include <algorithm>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> BoundedReLuTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -41,11 +41,9 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
unsigned int outputChannels = inputChannels;
unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
if(armnn::IsQuantizedType<T>())
{
@@ -115,7 +113,7 @@ LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
0.999f, 1.0f, 0.89f, 1.0f,
};
- return BoundedReLuTestCommon(
+ return BoundedReLuTestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
inputWidth, inputHeight, inputChannels, inputBatchSize);
}
@@ -146,7 +144,7 @@ LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(
0.999f, 1.2f, 0.89f, 6.0f,
};
- return BoundedReLuTestCommon(
+ return BoundedReLuTestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
inputWidth, inputHeight, inputChannels, inputBatchSize);
}
@@ -176,10 +174,10 @@ LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
float outputScale = 6.0f / 255.0f;
int32_t outputOffset = 0;
- return BoundedReLuTestCommon(workloadFactory, memoryManager, 6.0f, 0.0f,
- inputScale, inputOffset, outputScale, outputOffset,
- input, output,
- inputWidth, inputHeight, inputChannels, inputBatchSize);
+ return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 6.0f, 0.0f,
+ inputScale, inputOffset, outputScale, outputOffset,
+ input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
}
LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
@@ -205,10 +203,10 @@ LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
int32_t inputOffset = 112;
float inputScale = 0.0125f;
- return BoundedReLuTestCommon(workloadFactory, memoryManager, 1.0f, -1.0f,
- inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
- input, output,
- inputWidth, inputHeight, inputChannels, inputBatchSize);
+ return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 1.0f, -1.0f,
+ inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
+ input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
}
namespace
@@ -303,7 +301,7 @@ LayerTestResult<float, 4> CompareBoundedReLuTest(
return result;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> ConstantLinearActivationTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -320,8 +318,8 @@ LayerTestResult<T,4> ConstantLinearActivationTestCommon(
unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
- inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -368,17 +366,18 @@ LayerTestResult<float, 4> ConstantLinearActivationTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantLinearActivationTestCommon<float>(workloadFactory, memoryManager);
+ return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantLinearActivationTestCommon<uint8_t>(workloadFactory, memoryManager, 4.0f, 3);
+ return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 4.0f, 3);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleActivationTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -400,10 +399,8 @@ LayerTestResult<T, 4> SimpleActivationTest(
constexpr static unsigned int outputChannels = inputChannels;
constexpr static unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -448,7 +445,7 @@ LayerTestResult<T, 4> SimpleActivationTest(
return result;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleSigmoidTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -470,32 +467,32 @@ LayerTestResult<T, 4> SimpleSigmoidTestCommon(
std::vector<float> outputExpectedData(inputData.size());
std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
- return SimpleActivationTest<T>(workloadFactory,
- memoryManager,
- armnn::ActivationFunction::Sigmoid,
- 0.f,
- 0.f,
- qScale,
- qOffset,
- inputData,
- outputExpectedData);
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::Sigmoid,
+ 0.f,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
}
LayerTestResult<float, 4> SimpleSigmoidTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SimpleSigmoidTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SimpleSigmoidTestCommon<uint8_t>(workloadFactory, memoryManager, 0.1f, 50);
+ return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> CompareActivationTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -517,8 +514,8 @@ LayerTestResult<T,4> CompareActivationTestImpl(
unsigned int shape[] = {batchSize, channels, height, width};
- inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -596,7 +593,7 @@ LayerTestResult<float,4> CompareActivationTest(
armnn::ActivationFunction f,
unsigned int batchSize)
{
- return CompareActivationTestImpl<float>(
+ return CompareActivationTestImpl<armnn::DataType::Float32>(
workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
}
@@ -606,6 +603,6 @@ LayerTestResult<uint8_t,4> CompareActivationUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::ActivationFunction f)
{
- return CompareActivationTestImpl<uint8_t>(
+ return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
}
diff --git a/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp b/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp
index f70bf48ca9..1d6cf1d99b 100644
--- a/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp
@@ -4,6 +4,8 @@
//
#pragma once
+#include "TypeUtils.hpp"
+
#include <armnn/INetwork.hpp>
#include <backendsCommon/test/CommonTestUtils.hpp>
@@ -49,7 +51,7 @@ INetworkPtr CreateArithmeticNetwork(const std::vector<TensorShape>& inputShapes,
return net;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void ArithmeticSimpleEndToEnd(const std::vector<BackendId>& backends,
const LayerType type,
const std::vector<T> expectedOutput)
@@ -60,7 +62,7 @@ void ArithmeticSimpleEndToEnd(const std::vector<BackendId>& backends,
const TensorShape& outputShape = { 2, 2, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateArithmeticNetwork<GetDataType<T>()>(inputShapes, outputShape, type);
+ INetworkPtr net = CreateArithmeticNetwork<ArmnnType>(inputShapes, outputShape, type);
BOOST_TEST_CHECKPOINT("create a network");
@@ -76,7 +78,7 @@ void ArithmeticSimpleEndToEnd(const std::vector<BackendId>& backends,
EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void ArithmeticBroadcastEndToEnd(const std::vector<BackendId>& backends,
const LayerType type,
const std::vector<T> expectedOutput)
@@ -87,7 +89,7 @@ void ArithmeticBroadcastEndToEnd(const std::vector<BackendId>& backends,
const TensorShape& outputShape = { 1, 2, 2, 3 };
// Builds up the structure of the network
- INetworkPtr net = CreateArithmeticNetwork<GetDataType<T>()>(inputShapes, outputShape, type);
+ INetworkPtr net = CreateArithmeticNetwork<ArmnnType>(inputShapes, outputShape, type);
BOOST_TEST_CHECKPOINT("create a network");
diff --git a/src/backends/backendsCommon/test/BatchNormTestImpl.hpp b/src/backends/backendsCommon/test/BatchNormTestImpl.hpp
index d63f0b5610..ded4a067b4 100644
--- a/src/backends/backendsCommon/test/BatchNormTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchNormTestImpl.hpp
@@ -4,6 +4,7 @@
//
#pragma once
+#include "TypeUtils.hpp"
#include "WorkloadTestUtils.hpp"
#include <armnn/ArmNN.hpp>
@@ -18,7 +19,7 @@
#include <DataLayoutIndexed.hpp>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> BatchNormTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -29,13 +30,13 @@ LayerTestResult<T, 4> BatchNormTestImpl(
int32_t qOffset,
armnn::DataLayout dataLayout)
{
- armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
- armnn::GetDataType<T>());
+ ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if (armnn::IsQuantizedType<T>())
@@ -102,7 +103,7 @@ LayerTestResult<T, 4> BatchNormTestImpl(
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> BatchNormTestNhwcImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -114,9 +115,9 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
const unsigned int channels = 2;
const unsigned int num = 1;
- armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
- armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
+ armnn::TensorInfo tensorInfo({channels}, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
index 8d292c84bb..24f0825504 100755
--- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
@@ -6,6 +6,7 @@
#include "WorkloadTestUtils.hpp"
#include "TensorUtils.hpp"
+#include "TypeUtils.hpp"
#include <Permute.hpp>
#include <DataLayoutIndexed.hpp>
@@ -70,7 +71,8 @@ void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
}
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -115,12 +117,12 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
// Note these tensors will use two (identical) batches.
armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo<T>(2*outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
armnn::TensorInfo kernelDesc =
- armnnUtils::GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+ armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -230,7 +232,8 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -266,11 +269,11 @@ LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(
bool biasEnabled = bias.size() > 0;
// Creates the tensors.
- armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
- armnn::GetDataType<T>());
- armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+ ArmnnType);
+ armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Construct the input data.
std::vector<T> inputData;
@@ -322,7 +325,8 @@ LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -359,11 +363,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
// Creates the tensors.
armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+ armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
+ armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if (armnn::IsQuantizedType<T>())
@@ -459,7 +463,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -468,6 +472,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
bool biasEnabled,
const armnn::DataLayout layout)
{
+ using B = armnn::ResolveType<ArmnnBType>;
+
unsigned int inputHeight = 3;
unsigned int inputWidth = 3;
unsigned int inputChannels = 2;
@@ -484,12 +490,12 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
unsigned int outputNum = inputNum;
armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
armnn::TensorInfo kernelDesc({kernelDepthMultiplier, kernelChannels, kernelHeight, kernelWidth},
- armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType<B>());
+ ArmnnType);
+ armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -602,7 +608,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -611,6 +617,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
bool biasEnabled,
const armnn::DataLayout layout)
{
+ using B = armnn::ResolveType<ArmnnBType>;
+
unsigned int depthMultiplier = 2;
unsigned int inputHeight = 8;
@@ -626,13 +634,13 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
unsigned int outputChannels = inputChannels * depthMultiplier;
unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(
- inputBatchSize, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(
- outputBatchSize, outputChannels, outputHeight, outputWidth, layout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
+ inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
+ outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
armnn::TensorInfo kernelDesc({depthMultiplier, inputChannels, kernelHeight, kernelWidth},
- armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType<B>());
+ ArmnnType);
+ armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -803,7 +811,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -836,11 +845,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
// Creates the tensors.
- armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
- armnn::GetDataType<T>());
- armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+ ArmnnType);
+ armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if (armnn::IsQuantizedType<T>())
@@ -904,7 +913,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
return ret;
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> Convolution1dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -912,8 +921,7 @@ LayerTestResult<T,4> Convolution1dTestImpl(
int32_t qOffset,
bool biasEnabled)
{
- using B = typename FullyConnectedBiasTypeForInputType<T>::Type;
-
+ using B = armnn::ResolveType<ArmnnBType>;
// Until we have a specialist 1D convolution layer, we can fake one using
// 2D convolution with the final dimension set to 1.
// I don't anticipate this being particularly slow, given that convolution is implemented
@@ -928,10 +936,10 @@ LayerTestResult<T,4> Convolution1dTestImpl(
unsigned int stride = 1;
unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
- armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo biasInfo({outputChannels}, armnn::GetDataType<B>());
+ armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
+ armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
+ armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
+ armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -1023,9 +1031,7 @@ LayerTestResult<T,4> Convolution1dTestImpl(
return ret;
}
-
-
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1059,10 +1065,10 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
unsigned int biasShape[] = {outputChannels};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
- kernelDesc = armnn::TensorInfo(4, kernelShape, armnn::GetDataType<T>());
- biasDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+ kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
+ biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
LayerTestResult<T,4> ret(outputTensorInfo);
@@ -1123,7 +1129,7 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
return ret;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1178,11 +1184,11 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
int32_t qOffset = 0;
- inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), armnn::GetDataType<T>(), inputsQScale, qOffset);
- outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), armnn::GetDataType<T>(), outputQScale, qOffset);
- kernelDesc = armnn::TensorInfo(4, kernelShape.data(), armnn::GetDataType<T>(), inputsQScale, qOffset);
+ inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
+ kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
biasDesc = armnn::TensorInfo(
- 1, biasShape.data(), armnn::GetBiasDataType(armnn::GetDataType<T>()), inputsQScale, qOffset);
+ 1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
LayerTestResult<T, 4> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/DebugTestImpl.hpp b/src/backends/backendsCommon/test/DebugTestImpl.hpp
index d112054198..14808f4856 100644
--- a/src/backends/backendsCommon/test/DebugTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DebugTestImpl.hpp
@@ -80,7 +80,7 @@ LayerTestResult<T, Dim> DebugTestImpl(
return ret;
}
-template <typename T>
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Debug4DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -95,8 +95,8 @@ LayerTestResult<T, 4> Debug4DTest(
desc.m_Parameters.m_LayerName = "TestOutput";
desc.m_Parameters.m_SlotIndex = 1;
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -131,7 +131,7 @@ LayerTestResult<T, 4> Debug4DTest(
expectedStringOutput);
}
-template <typename T>
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Debug3DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -145,8 +145,8 @@ LayerTestResult<T, 3> Debug3DTest(
armnn::DebugQueueDescriptor desc;
desc.m_Parameters.m_LayerName = "TestOutput";
- inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -179,7 +179,7 @@ LayerTestResult<T, 3> Debug3DTest(
expectedStringOutput);
}
-template <typename T>
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Debug2DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -193,8 +193,8 @@ LayerTestResult<T, 2> Debug2DTest(
armnn::DebugQueueDescriptor desc;
desc.m_Parameters.m_LayerName = "TestOutput";
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -225,7 +225,7 @@ LayerTestResult<T, 2> Debug2DTest(
expectedStringOutput);
}
-template <typename T>
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 1> Debug1DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -239,8 +239,8 @@ LayerTestResult<T, 1> Debug1DTest(
armnn::DebugQueueDescriptor desc;
desc.m_Parameters.m_LayerName = "TestOutput";
- inputTensorInfo = armnn::TensorInfo(1, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(1, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(1, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(1, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
diff --git a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp
index e7c0f01cc9..cfdae63c26 100644
--- a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp
@@ -3,6 +3,7 @@
// SPDX-License-Identifier: MIT
//
+#include "TypeUtils.hpp"
#include "WorkloadTestUtils.hpp"
#include <backendsCommon/IBackendInternal.hpp>
@@ -220,7 +221,7 @@ LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(
// Tests the fully connected layer with large values, optionally transposing weights.
// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
//
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -252,10 +253,10 @@ LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
unsigned int biasShape[] = { outputChannels };
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
- weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::GetDataType<T>());
- biasesDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
+ weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
+ biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 0bf56e2445..3c78c82b6e 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -5,6 +5,7 @@
#include "LayerTests.hpp"
#include "WorkloadTestUtils.hpp"
#include "TensorUtils.hpp"
+#include "TypeUtils.hpp"
#include "test/TensorHelpers.hpp"
#include "TensorCopyUtils.hpp"
@@ -75,12 +76,12 @@ static std::vector<float> ConvInput3x8x16({
static std::vector<float> Bias2({0, 2});
// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
{
if(biasEnabled)
{
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
return bias;
}
@@ -90,7 +91,7 @@ boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffse
}
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -100,11 +101,11 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
const armnn::DataLayout layout)
{
// Use common single-batch 3-channel 16x8 image.
- armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
// Use a 2-element batch with 3-channel 3x5 kernels.
- armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
1, 1, 1,
@@ -146,7 +147,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
})));
// Expected output is 2 batch elements of a 1-channel 14x4 image.
- armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
@@ -162,18 +163,20 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout);
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -185,11 +188,11 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
// Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
// Use common single-batch 3-channel 16x8 image.
- armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
// Use a 2-element batch of 3-channel 3x3 kernels.
- armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
1, 1, 1,
@@ -219,7 +222,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
})));
// Expected output is 1 batch of a 2-channel 14x6 image.
- armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
@@ -237,18 +240,19 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout);
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -259,7 +263,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
{
// Use common single-batch 5x5 image.
- armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
{
1, 5, 2, 3,
@@ -269,7 +273,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
// Use a 2-element batch of 3-channel 3x3 kernels.
- armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
4, 5, 6,
0, 0, 0,
@@ -277,7 +281,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
});
// Expected output is 1 batch of a 5x5 image.
- armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
const std::vector<float> outputData =
{
@@ -288,18 +292,19 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
- return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- boost::multi_array<T, 1>(),
- expectedOutput,
- dataLayout,
- qScale,
- qOffset);
+ return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ boost::multi_array<T, 1>(),
+ expectedOutput,
+ dataLayout,
+ qScale,
+ qOffset);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -309,7 +314,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
const armnn::DataLayout& dataLayout)
{
// Input is a single-batch, 1 channel, 5x5 image.
- armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
{
1, 5, 2, 3, 5,
@@ -320,7 +325,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
});
// Use a 3x3 kernel.
- armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
{
4, 5, 6,
@@ -329,7 +334,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
});
// Expected output is a single-batch, 1 channel, 3x3 image.
- armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
const std::vector<T> outputData =
{
@@ -347,21 +352,22 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
uint32_t strideX = 2;
uint32_t strideY = 2;
- return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- boost::multi_array<T, 1>(),
- expectedOutput,
- dataLayout,
- qScale,
- qOffset,
- padLeft,
- padTop,
- padRight,
- padBottom,
- strideX,
- strideY);
+ return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ boost::multi_array<T, 1>(),
+ expectedOutput,
+ dataLayout,
+ qScale,
+ qOffset,
+ padLeft,
+ padTop,
+ padRight,
+ padBottom,
+ strideX,
+ strideY);
}
LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
@@ -370,7 +376,8 @@ LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
}
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
@@ -379,7 +386,8 @@ LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
@@ -388,7 +396,8 @@ LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
@@ -396,12 +405,13 @@ LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory,
- memoryManager,
- 0.f,
- 0,
- biasEnabled,
- armnn::DataLayout::NHWC);
+ return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ 0.f,
+ 0,
+ biasEnabled,
+ armnn::DataLayout::NHWC);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
@@ -410,12 +420,13 @@ LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3Stride2x2TestCommon<float>(workloadFactory,
- memoryManager,
- 0.f,
- 0,
- biasEnabled,
- layout);
+ return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ 0.f,
+ 0,
+ biasEnabled,
+ layout);
}
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
@@ -424,10 +435,12 @@ LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -436,7 +449,7 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
int32_t qOffset)
{
// Use a single-batch 1-channel 3x3 image as input.
- armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
11,21,31,
@@ -445,7 +458,7 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
})));
// Use 1 batch of a 1-channel 2x2 kernel.
- armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-11,-21,
@@ -461,7 +474,7 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
- armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
0, 0, 0, 0, 0, 0,
@@ -474,22 +487,24 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout,
- 1, // Padding left.
- 2, // Padding top.
- 3, // Padding right.
- 4); // Padding bottom.
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(false, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout,
+ 1, // Padding left.
+ 2, // Padding top.
+ 3, // Padding right.
+ 4); // Padding bottom.
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -498,7 +513,7 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
int32_t qOffset)
{
// Use a single-batch 1-channel 5x5 image as input.
- armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
11,21,31,41,51,
@@ -509,7 +524,7 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
})));
// Use 1 batch of a 1-channel 4x4 kernel.
- armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-11,-21,-31,-41,
@@ -519,7 +534,7 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
})));
// Expected output is 1 batch of a 1-channel 5x5 image.
- armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
std::vector<T> myVec(outputDesc.GetNumElements(), 0);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
@@ -530,11 +545,12 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
-5032, -7256, -9376, -6142, -3368,
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
+ GetBias2<ArmnnBType>(false, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
@@ -545,7 +561,8 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
2); // Padding bottom.
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -555,7 +572,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
const armnn::DataLayout layout)
{
// Use a single-batch 2-channel 5x5 image as input.
- armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
0, 1, 2, 3, 4,
@@ -572,7 +589,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
})));
// Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
- armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
32, 31, 30, 29,
@@ -588,7 +605,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
// Expected output is 1 batch of a 2-channel 5x5 image.
// Calculated using the python tensorflow library with strideX=1, strideY=1.
- armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
1062, 1580, 1850, 1530, 1117,
@@ -603,11 +620,12 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
3100, 4352, 4452, 3517, 2465
})));
- return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
+ return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
@@ -620,7 +638,8 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
1); // strideY
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -628,7 +647,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
int32_t qOffset,
bool biasEnabled)
{
- armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
0, 25,
@@ -662,7 +681,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
24, 49
})));
- armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
32, 31, 30, 29,
@@ -676,7 +695,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
4, 3, 2, 1
})));
- armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
1062, 1550,
@@ -710,11 +729,12 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
2457, 2465
})));
- return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
+ return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
@@ -732,8 +752,9 @@ Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
- workloadFactory, memoryManager, layout, 0.0f, 0);
+ return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
+ <armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, layout, 0.0f, 0);
}
LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
@@ -741,7 +762,7 @@ LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
+ return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, layout, 0.0f, 0);
}
@@ -751,7 +772,7 @@ LayerTestResult<float, 4> DepthwiseConvolution2dTest(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<float, float>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
@@ -760,7 +781,8 @@ LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
+ return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
}
LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
@@ -769,7 +791,7 @@ LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
@@ -779,7 +801,7 @@ LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dAsymmetricTestCommon<float>(
+ return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
@@ -789,7 +811,7 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -799,7 +821,7 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
@@ -808,7 +830,8 @@ LayerTestResult<float, 4> Convolution1dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return Convolution1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
+ return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
}
LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
@@ -816,7 +839,8 @@ LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return Convolution1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
+ return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
}
LayerTestResult<float,4> CompareConvolution2dTest(
@@ -824,30 +848,29 @@ LayerTestResult<float,4> CompareConvolution2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- return CompareConvolution2dTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory);
+ return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory);
}
-template<typename T>
-LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(
+LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
const armnn::DataLayout layout)
{
- return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
+ return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory, layout);
}
-template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
- armnn::IWorkloadFactory&,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
- armnn::IWorkloadFactory&,
- const armnn::DataLayout);
-
-template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
- armnn::IWorkloadFactory&,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
- armnn::IWorkloadFactory&,
- const armnn::DataLayout);
+LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::DataLayout layout)
+{
+ return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, refWorkloadFactory, layout);
+}
LayerTestResult<float,4> SimpleNormalizationAcrossTest(
armnn::IWorkloadFactory& workloadFactory,
@@ -881,7 +904,7 @@ LayerTestResult<float,2> SimpleSoftmaxTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<float>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
}
LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
@@ -889,7 +912,7 @@ LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
}
LayerTestResult<float,4> CompareNormalizationTest(
@@ -908,7 +931,8 @@ LayerTestResult<float,2> CompareSoftmaxTest(
armnn::IWorkloadFactory& refWorkloadFactory,
float beta)
{
- return CompareSoftmaxTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory, beta);
+ return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory, beta);
}
LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
@@ -917,46 +941,47 @@ LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
float beta)
{
- return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, refWorkloadFactory, beta);
+ return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, refWorkloadFactory, beta);
}
std::vector<LayerTestResult<float,3>> SplitterTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<float>(workloadFactory, memoryManager);
+ return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 3> CopyViaSplitterTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{ 2., 3., 3., 4. }));
- armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
-0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
@@ -968,12 +993,12 @@ LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
- armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
-0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
@@ -989,12 +1014,12 @@ LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{2., 3., 3., 4.}));
- armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
-0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
@@ -1216,16 +1241,16 @@ LayerTestResult<float,4> AdditionTest(
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AdditionBroadcastTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
if (armnn::IsQuantizedType<T>())
{
@@ -1294,16 +1319,16 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
- armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
if (armnn::IsQuantizedType<T>())
{
@@ -1371,28 +1396,32 @@ LayerTestResult<float, 4> AdditionBroadcastTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, memoryManager, 2.f, 0);
+ return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 2.f, 0);
}
LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1333333f, 128);
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.1333333f, 128);
}
LayerTestResult<float,4> CompareAdditionTest(
@@ -1754,24 +1783,24 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
}
namespace {
- template <typename Descriptor, typename dataType>
- LayerTestResult<dataType, 4> ElementwiseTestHelper
+ template <typename Descriptor, armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+ LayerTestResult<T, 4> ElementwiseTestHelper
(armnn::IWorkloadFactory & workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[4], std::vector<dataType> values0,
- const unsigned int shape1[4], std::vector<dataType> values1,
- const unsigned int outShape[4], std::vector<dataType> outValues,
+ const unsigned int shape0[4], std::vector<T> values0,
+ const unsigned int shape1[4], std::vector<T> values1,
+ const unsigned int outShape[4], std::vector<T> outValues,
float qScale = 0.0f, int qOffset = 0)
{
const size_t dimensionCount = 4;
- armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::GetDataType<dataType>()};
- armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::GetDataType<dataType>()};
- armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::GetDataType<dataType>()};
+ armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnType};
+ armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnType};
+ armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnType};
- auto input0 = MakeTensor<dataType, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<dataType, 4>(inputTensorInfo1, values1);
+ auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
- if (armnn::IsQuantizedType<dataType>())
+ if (armnn::IsQuantizedType<T>())
{
inputTensorInfo0.SetQuantizationScale(qScale);
inputTensorInfo0.SetQuantizationOffset(qOffset);
@@ -1783,7 +1812,7 @@ namespace {
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- LayerTestResult<dataType,4> ret(outputTensorInfo);
+ LayerTestResult<T,4> ret(outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
@@ -1807,7 +1836,7 @@ namespace {
CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- ret.outputExpected = MakeTensor<dataType, 4>(outputTensorInfo, outValues);
+ ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
return ret;
}
}
@@ -1831,15 +1860,15 @@ LayerTestResult<float, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFacto
std::vector<float> output({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> EqualBroadcast1ElementTest(
@@ -1854,15 +1883,15 @@ LayerTestResult<float, 4> EqualBroadcast1ElementTest(
std::vector<float> output({ 1, 0, 0, 0, 0, 0, 0, 0});
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
@@ -1880,15 +1909,15 @@ LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
std::vector<float> output({ 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> EqualUint8Test(
@@ -1907,17 +1936,17 @@ LayerTestResult<uint8_t, 4> EqualUint8Test(
std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
@@ -1935,17 +1964,17 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
@@ -1963,17 +1992,17 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
@@ -1995,15 +2024,15 @@ LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFac
std::vector<float> output({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
@@ -2018,15 +2047,15 @@ LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
std::vector<float> output({ 0, 1, 1, 1, 1, 1, 1, 1});
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
@@ -2044,15 +2073,15 @@ LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
std::vector<float> output({ 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> GreaterUint8Test(
@@ -2071,17 +2100,17 @@ LayerTestResult<uint8_t, 4> GreaterUint8Test(
std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
@@ -2099,17 +2128,17 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
@@ -2127,17 +2156,17 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
@@ -2159,15 +2188,15 @@ LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFac
std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
4, 4, 4, 4, 5, 5, 5, 5 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
@@ -2182,15 +2211,15 @@ LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
@@ -2208,15 +2237,15 @@ LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
std::vector<float> output({ 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> MaximumUint8Test(
@@ -2235,17 +2264,17 @@ LayerTestResult<uint8_t, 4> MaximumUint8Test(
std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
4, 4, 4, 4, 5, 5, 5, 5 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
@@ -2263,17 +2292,17 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
@@ -2291,17 +2320,17 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
7, 10, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
@@ -2316,14 +2345,15 @@ LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
@@ -2339,14 +2369,15 @@ LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
@@ -2364,16 +2395,17 @@ LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
1, 1, 2, 1, 2, 3 });
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, uint8_t>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
namespace {
@@ -3044,20 +3076,20 @@ void Concatenate(
}
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 1> Concatenation1dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType);
auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
- armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType);
LayerTestResult<T, 1> result(outputTensorInfo);
@@ -3083,10 +3115,10 @@ LayerTestResult<float, 1> Concatenation1dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -3095,7 +3127,7 @@ LayerTestResult<T, 2> Concatenation2dTestImpl(
const float qScale,
const int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
@@ -3137,17 +3169,18 @@ LayerTestResult<T, 2> Concatenation2dTestImpl(
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
+
+ LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
- LayerTestResult<T, 2> result =
- Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
@@ -3175,20 +3208,21 @@ LayerTestResult<float, 2> Concatenation2dDim0Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
+
+ LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
- LayerTestResult<T, 2> result =
- Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
@@ -3204,17 +3238,17 @@ LayerTestResult<float, 2> Concatenation2dDim1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
@@ -3223,7 +3257,7 @@ LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
10.0f, 11.0f, 12.0f,
}));
- armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType);
auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
4.0f, 5.0f, 6.0f,
@@ -3235,13 +3269,13 @@ LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
7.0f, 8.0f, 9.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType);
auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 1
16.0f, 17.0f, 18.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
LayerTestResult<T, 2> result(outputTensorInfo);
std::vector<T> output;
@@ -3282,17 +3316,18 @@ LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
@@ -3301,7 +3336,7 @@ LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
10.0f, 11.0f, 12.0f,
}));
- armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType);
auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
@@ -3310,7 +3345,7 @@ LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType);
auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
9.0f,
@@ -3319,7 +3354,7 @@ LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
18.0f
}));
- armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
LayerTestResult<T, 2> result(outputTensorInfo);
std::vector<T> output;
@@ -3348,10 +3383,11 @@ LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -3361,7 +3397,7 @@ LayerTestResult<T, 3> Concatenation3dTestImpl(
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
@@ -3439,17 +3475,18 @@ LayerTestResult<T, 3> Concatenation3dTestImpl(
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
+
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -3513,20 +3550,20 @@ LayerTestResult<float, 3> Concatenation3dDim0Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
@@ -3591,10 +3628,10 @@ LayerTestResult<float, 3> Concatenation3dDim1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -3602,10 +3639,10 @@ LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
@@ -3635,17 +3672,18 @@ LayerTestResult<float, 3> Concatenation3dDim2Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2TestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -3666,7 +3704,7 @@ LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f, 8.0f,
@@ -3678,7 +3716,7 @@ LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
11.0f, 12.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
25.0f, 26.0f,
@@ -3708,7 +3746,7 @@ LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
35.0f, 36.0f
}));
- armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
@@ -3785,17 +3823,18 @@ LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -3816,7 +3855,7 @@ LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f, 8.0f,
@@ -3843,7 +3882,7 @@ LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
15.0f, 16.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
17.0f, 18.0f,
@@ -3852,7 +3891,7 @@ LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
31.0f, 32.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
@@ -3923,10 +3962,11 @@ LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -3934,7 +3974,7 @@ LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
@@ -3955,7 +3995,7 @@ LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f,
@@ -3976,7 +4016,7 @@ LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
29.0f
}));
- armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
13.0f, 14.0f, 50.0f,
@@ -3997,7 +4037,7 @@ LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
35.0f, 36.0f, 55.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
@@ -4039,10 +4079,11 @@ LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -4052,7 +4093,7 @@ LayerTestResult<T, 4> Concatenation4dTestImpl(
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
@@ -4099,17 +4140,18 @@ LayerTestResult<T, 4> Concatenation4dTestImpl(
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
@@ -4139,20 +4181,21 @@ LayerTestResult<float, 4> Concatenation4dDim0Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
@@ -4183,20 +4226,21 @@ LayerTestResult<float, 4> Concatenation4dDim1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
@@ -4227,10 +4271,10 @@ LayerTestResult<float, 4> Concatenation4dDim2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -4238,10 +4282,11 @@ LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
int32_t qOffset,
bool useSubtensor)
{
- armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 3,
- useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f,
@@ -4273,10 +4318,11 @@ LayerTestResult<float, 4> Concatenation4dDim3Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -4284,7 +4330,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
int32_t qOffset)
{
unsigned int dimension = 0;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
@@ -4295,7 +4341,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
@@ -4314,7 +4360,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
}));
- armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
@@ -4360,10 +4406,11 @@ LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -4371,7 +4418,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
int32_t qOffset)
{
unsigned int dimension = 1;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
@@ -4382,7 +4429,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
@@ -4392,7 +4439,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
}));
- armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
@@ -4428,10 +4475,11 @@ LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -4439,7 +4487,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
int32_t qOffset)
{
unsigned int dimension = 2;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
@@ -4450,7 +4498,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
@@ -4464,7 +4512,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
27.0f, 28.0f
}));
- armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
@@ -4507,10 +4555,11 @@ LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -4519,7 +4568,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
bool useSubtensor)
{
unsigned int dimension = 3;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
@@ -4530,7 +4579,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f, 13.0f,
@@ -4543,7 +4592,7 @@ LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
26.0f, 27.0f, 28.0f
}));
- armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
@@ -4576,7 +4625,8 @@ LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDiffShapeDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
}
LayerTestResult<float, 4> ResizeBilinearNopTest(
@@ -4584,8 +4634,11 @@ LayerTestResult<float, 4> ResizeBilinearNopTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
@@ -4638,8 +4691,11 @@ LayerTestResult<float, 4> SimpleResizeBilinearTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 255.0f,
@@ -4704,8 +4760,11 @@ LayerTestResult<float, 4> ResizeBilinearSqMinTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
@@ -4770,8 +4829,11 @@ LayerTestResult<float, 4> ResizeBilinearMinTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
@@ -4834,8 +4896,11 @@ LayerTestResult<float, 4> ResizeBilinearMagTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f,
@@ -5021,7 +5086,7 @@ float CalcInvL2Norm(std::initializer_list<float> elements)
} // anonymous namespace
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Pad2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -5031,8 +5096,8 @@ LayerTestResult<T, 2> Pad2dTestCommon(
const armnn::TensorShape inputShape{ 3, 3 };
const armnn::TensorShape outputShape{ 7, 7 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale, qOffset,
@@ -5089,7 +5154,7 @@ LayerTestResult<T, 2> Pad2dTestCommon(
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Pad3dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -5099,8 +5164,8 @@ LayerTestResult<T, 3> Pad3dTestCommon(
const armnn::TensorShape inputShape{ 2, 2, 2 };
const armnn::TensorShape outputShape{ 3, 5, 6 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale,qOffset,
@@ -5173,7 +5238,7 @@ LayerTestResult<T, 3> Pad3dTestCommon(
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Pad4dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -5183,8 +5248,8 @@ LayerTestResult<T, 4> Pad4dTestCommon(
const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale,qOffset,
@@ -5414,42 +5479,42 @@ LayerTestResult<uint8_t, 2> PadUint82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 3> PadUint83dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad3dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> PadUint84dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad4dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 2> PadFloat322dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 3> PadFloat323dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad3dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 4> PadFloat324dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad4dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 4> L2Normalization1dTest(
@@ -5777,7 +5842,7 @@ LayerTestResult<float, 4> L2Normalization4dTest(
inputValues, expectedOutputValues, layout);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ConstantTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -5794,11 +5859,9 @@ LayerTestResult<T, 4> ConstantTestImpl(
constexpr unsigned int outputChannels = inputChannels;
constexpr unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -5876,14 +5939,14 @@ LayerTestResult<float, 4> ConstantTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantTestUint8(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 3> MergerUint8Test(
@@ -6934,9 +6997,10 @@ LayerTestResult<float, 4> BatchNormTest(
2.f, 4.f
};
- return BatchNormTestImpl<float>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 0.f, 0, armnn::DataLayout::NCHW);
+ return BatchNormTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 0.f, 0, armnn::DataLayout::NCHW);
}
LayerTestResult<float, 4> BatchNormNhwcTest(
@@ -6978,9 +7042,10 @@ LayerTestResult<float, 4> BatchNormNhwcTest(
6.f, 4.f
};
- return BatchNormTestImpl<float>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 0.f, 0, armnn::DataLayout::NHWC);
+ return BatchNormTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 0.f, 0, armnn::DataLayout::NHWC);
}
LayerTestResult<uint8_t, 4> BatchNormUint8Test(
@@ -7018,9 +7083,10 @@ LayerTestResult<uint8_t, 4> BatchNormUint8Test(
2.f, 4.f
};
- return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 1.f/20.f, 50, armnn::DataLayout::NCHW);
+ return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 1.f/20.f, 50, armnn::DataLayout::NCHW);
}
LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
@@ -7062,65 +7128,68 @@ LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
6.f, 4.f
};
- return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 1.f/20.f, 50, armnn::DataLayout::NHWC);
+ return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
+ (workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 1.f/20.f, 50, armnn::DataLayout::NHWC);
}
LayerTestResult<uint8_t, 4> ConstantUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
}
LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
@@ -7128,21 +7197,23 @@ LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
@@ -7150,56 +7221,61 @@ LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
{
- return Concatenation4dDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
@@ -7207,7 +7283,8 @@ LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDiffShapeDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
@@ -7215,7 +7292,8 @@ LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, forceNoPadding);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
@@ -7223,7 +7301,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
}
@@ -7232,7 +7310,8 @@ LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, forceNoPadding);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
@@ -7240,7 +7319,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
}
@@ -7249,7 +7328,7 @@ LayerTestResult<float, 4> SimpleMaxPooling2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
@@ -7257,7 +7336,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> SimpleAveragePooling2dTest(
@@ -7265,7 +7344,7 @@ LayerTestResult<float, 4> SimpleAveragePooling2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
@@ -7273,7 +7352,7 @@ LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<uint8_t>(
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, dataLayout, 0.5, -1);
}
@@ -7282,7 +7361,7 @@ LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(
+ return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, forceNoPadding);
}
@@ -7290,14 +7369,15 @@ LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 0.5, -1);
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5, -1);
}
LayerTestResult<float, 4> SimpleL2Pooling2dTest(
@@ -7305,7 +7385,7 @@ LayerTestResult<float, 4> SimpleL2Pooling2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
@@ -7313,91 +7393,91 @@ LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize7Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize9Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> ComparePooling2dTest(
@@ -7406,7 +7486,7 @@ LayerTestResult<float, 4> ComparePooling2dTest(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<float>(
+ return ComparePooling2dTestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType);
}
@@ -7416,7 +7496,7 @@ LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<uint8_t>(
+ return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
}
@@ -7425,105 +7505,111 @@ LayerTestResult<float, 2> FullyConnectedLargeTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool transposeWeights)
{
- return FullyConnectedLargeTestCommon<float>(workloadFactory, memoryManager, transposeWeights);
+ return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
}
LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 1.0f, -5);
}
LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 1.0f, -5);
}
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SimplePermuteFloat32Test(
@@ -7800,8 +7886,8 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
// 4, 5, 6
// 7, 8, 9
- armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
- armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
+ armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
+ armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
{1, 2, 3,
@@ -7846,8 +7932,8 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
// 12, 16
// 24, 28
- armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
- armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
+ armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
+ armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
{12, 16,
@@ -7898,112 +7984,112 @@ LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
namespace {
@@ -8263,126 +8349,126 @@ LayerTestResult<float, 4> StridedSlice4DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<float>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<float>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<float>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> StridedSlice3DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSlice2DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
armnn::IWorkloadFactory& workloadFactory,
@@ -8517,56 +8603,56 @@ LayerTestResult<float, 4> Debug4DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4DTest<float>(workloadFactory, memoryManager);
+ return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> Debug3DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3DTest<float>(workloadFactory, memoryManager);
+ return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> Debug2DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2DTest<float>(workloadFactory, memoryManager);
+ return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 1> Debug1DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1DTest<float>(workloadFactory, memoryManager);
+ return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> Debug4DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> Debug3DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> Debug2DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 1> Debug1DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> PreCompiledConvolution2dTest(
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 744470db49..7e955653ca 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -121,6 +121,18 @@ LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
bool biasEnabled,
const armnn::DataLayout layout);
+LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::DataLayout layout);
+
+LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::DataLayout layout);
+
LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/backendsCommon/test/LstmTestImpl.hpp b/src/backends/backendsCommon/test/LstmTestImpl.hpp
index 56f40aba84..e300a529ce 100644
--- a/src/backends/backendsCommon/test/LstmTestImpl.hpp
+++ b/src/backends/backendsCommon/test/LstmTestImpl.hpp
@@ -29,15 +29,15 @@ LayerTestResult<float, 2> LstmNoCifgNoPeepholeNoProjectionTestImpl(
unsigned numUnits = outputSize;
- armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
- armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
LayerTestResult<float, 2> ret(outputTensorInfo);
@@ -91,9 +91,9 @@ LayerTestResult<float, 2> LstmNoCifgNoPeepholeNoProjectionTestImpl(
AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- armnn::TensorInfo tensorInfo4({numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo8({numUnits, 2}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::GetDataType<float>());
+ armnn::TensorInfo tensorInfo4({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo8({numUnits, 2}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::DataType::Float32);
auto inputToInputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f,
-0.34550029f, 0.04266912f, -0.15680569f,
@@ -232,15 +232,15 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
unsigned int inputSize = 5;
unsigned numUnits = 20;
- armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
// Scratch buffer size without CIFG [batchSize, numUnits * 4]
- armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
LayerTestResult<float, 2> ret(outputTensorInfo);
@@ -292,11 +292,11 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::GetDataType<float>());
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
auto inputToInputWeights =
MakeTensor<float, 2>(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f,
@@ -950,15 +950,15 @@ LayerTestResult<float, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
const unsigned int cellSize = outputSize;
// Decide the shape of all input tensors
- armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, armnn::DataType::Float32);
unsigned int scratchBufferSize = cifgEnabled ? cellSize * 3 : cellSize * 4;
- armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
// List of inputs
std::vector<float> inputData;
@@ -974,9 +974,9 @@ LayerTestResult<float, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
// Prepare all the weights in the descriptor for LSTM
armnn::LstmQueueDescriptor data;
- armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfoNumUnits({cellSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfoNumUnits({cellSize}, armnn::DataType::Float32);
auto inputToCellWeights = MakeTensor<float, 2>(tensorInfoInput,
{-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp
index e0b8233336..2bdfe286c9 100644
--- a/src/backends/backendsCommon/test/MergerTestImpl.hpp
+++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp
@@ -4,6 +4,8 @@
//
#pragma once
+#include "TypeUtils.hpp"
+
#include <armnn/INetwork.hpp>
#include <backendsCommon/test/CommonTestUtils.hpp>
@@ -47,17 +49,18 @@ INetworkPtr CreateMergerNetwork(const std::vector<TensorShape>& inputShapes,
return net;
}
-template<typename T>
+template<armnn::DataType ArmnnType>
void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
+ using T = ResolveType<ArmnnType>;
unsigned int concatAxis = 0;
const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
const TensorShape& outputShape = { 4, 3, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
@@ -110,17 +113,18 @@ void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<typename T>
+template<armnn::DataType ArmnnType>
void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
+ using T = ResolveType<ArmnnType>;
unsigned int concatAxis = 1;
const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
const TensorShape& outputShape = { 2, 6, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
@@ -173,17 +177,18 @@ void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<typename T>
+template<armnn::DataType ArmnnType>
void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
+ using T = ResolveType<ArmnnType>;
unsigned int concatAxis = 2;
const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
const TensorShape& outputShape = { 2, 3, 4, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
@@ -236,7 +241,7 @@ void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void MergerDim3EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
@@ -246,7 +251,7 @@ void MergerDim3EndToEnd(const std::vector<BackendId>& backends)
const TensorShape& outputShape = { 2, 3, 2, 4 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
diff --git a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
index b542938585..5edf9c802f 100644
--- a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
@@ -27,7 +27,7 @@
#include <algorithm>
#include <string>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimplePooling2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -53,10 +53,11 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
- inputWidth, dataLayout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
- outputWidth, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
+ inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
+
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
+ outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -117,7 +118,7 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
// channels: 2
// batch size: 2
//
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -148,8 +149,8 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
unsigned int channels = 2;
unsigned int batchSize = 2;
- armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -236,11 +237,11 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
}));
}
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -255,8 +256,8 @@ LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -305,11 +306,11 @@ LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -324,8 +325,8 @@ LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -374,11 +375,11 @@ LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -395,8 +396,8 @@ LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
descriptor.m_PadBottom = 50;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -425,11 +426,11 @@ LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -444,8 +445,8 @@ LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
std::vector<T> inputData(
QuantizedVector<T>(qScale, qOffset, {
@@ -485,11 +486,11 @@ LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -502,7 +503,7 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
descriptor.m_StrideX = descriptor.m_StrideY = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
2.0f, 1.0f, 5.0f, 2.0f,
@@ -511,18 +512,18 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
2.0f, 1.0f, 5.0f, 2.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f, 3.0f,
3.0f, 3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -535,7 +536,7 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
descriptor.m_StrideX = descriptor.m_StrideY = 3;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
@@ -549,7 +550,7 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f, 3.0f, 3.0f,
@@ -557,11 +558,11 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
3.0f, 3.0f, 3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -574,7 +575,7 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
descriptor.m_StrideX = descriptor.m_StrideY = 4;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
@@ -586,18 +587,18 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f, 3.0f,
3.0f, 3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -610,7 +611,7 @@ LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
descriptor.m_StrideX = descriptor.m_StrideY = 7;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
@@ -622,17 +623,17 @@ LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -645,7 +646,7 @@ LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
descriptor.m_StrideX = descriptor.m_StrideY = 9;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
@@ -659,25 +660,25 @@ LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale = 1.0f,
int32_t qOffset = 0)
{
- armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
armnn::Pooling2dDescriptor descriptor;
descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
@@ -704,11 +705,11 @@ LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
0.0f, 3.0f, 0.0f, 3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ComparePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -737,8 +738,8 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -820,7 +821,7 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
// channels: 1
// batch size: 1
//
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -870,10 +871,10 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
618.0f, 582.0f
};
- armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
// Scale and offset should match input - we're just calculating maximum values.
- armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -890,7 +891,7 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
@@ -903,7 +904,7 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
// channels: 1
// batch size: 1
//
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -948,10 +949,10 @@ LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
10.5f,
};
- armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
// Scale and offset should match input - we're just calculating average values.
- armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -968,12 +969,12 @@ LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -990,8 +991,8 @@ LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -1017,11 +1018,11 @@ LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1.0f, 2.0f, -4.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1038,8 +1039,8 @@ LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -1066,11 +1067,11 @@ LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
2.0f, 2.0f, 2.0f, -3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1087,8 +1088,8 @@ LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -1114,11 +1115,11 @@ LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
3.0f, 13.0f, 10.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1136,8 +1137,8 @@ LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -1162,11 +1163,11 @@ LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
2.0f, 3.5f
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1183,8 +1184,8 @@ LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -1211,11 +1212,11 @@ LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
9.0f, 11.0f, 12.0f, 7.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1232,8 +1233,8 @@ LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -1259,11 +1260,11 @@ LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
8.0f, 1.4142f, 4.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1280,8 +1281,8 @@ LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -1308,6 +1309,6 @@ LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1.0540f, 1.7638f, 2.5385f, 2.3570f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
diff --git a/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp
index 97199e3c53..25ceda1128 100644
--- a/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp
@@ -19,7 +19,7 @@
#include <algorithm>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -32,13 +32,13 @@ LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
unsigned int inputShape[] = { 2, 4 };
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
float qScale = 1.f / 256.f;
int qOffset = 0;
inputTensorInfo.SetQuantizationScale(qScale);
inputTensorInfo.SetQuantizationOffset(qOffset);
- outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+ outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
outputTensorInfo.SetQuantizationScale(qScale);
outputTensorInfo.SetQuantizationOffset(qOffset);
@@ -87,7 +87,7 @@ LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
return ret;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> CompareSoftmaxTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -103,8 +103,8 @@ LayerTestResult<T, 2> CompareSoftmaxTestImpl(
unsigned int inputShape[] = { batchSize, channels };
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
float qScale = 1.f / 256.f;
int qOffset = 0;
inputTensorInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp b/src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp
index 814607ddff..756a51cad3 100644
--- a/src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SpaceToBatchNdTestImpl.hpp
@@ -79,7 +79,7 @@ LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -96,8 +96,8 @@ LayerTestResult<T, 4> SpaceToBatchNdSimpleTest(
desc.m_Parameters.m_BlockShape = {2, 2};
desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -113,7 +113,7 @@ LayerTestResult<T, 4> SpaceToBatchNdSimpleTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdMultiChannelsTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -130,8 +130,8 @@ LayerTestResult<T, 4> SpaceToBatchNdMultiChannelsTest(
desc.m_Parameters.m_BlockShape = {2, 2};
desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -152,7 +152,7 @@ LayerTestResult<T, 4> SpaceToBatchNdMultiChannelsTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdMultiBlockTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -169,8 +169,8 @@ LayerTestResult<T, 4> SpaceToBatchNdMultiBlockTest(
desc.m_Parameters.m_BlockShape = {2, 2};
desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -192,7 +192,7 @@ LayerTestResult<T, 4> SpaceToBatchNdMultiBlockTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -209,8 +209,8 @@ LayerTestResult<T, 4> SpaceToBatchNdPaddingTest(
desc.m_Parameters.m_BlockShape = {2, 2};
desc.m_Parameters.m_PadList = {{0, 0}, {2, 0}};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -236,34 +236,34 @@ LayerTestResult<T, 4> SpaceToBatchNdPaddingTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdSimpleNHWCTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<T>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
+ return SpaceToBatchNdSimpleTest<ArmnnType>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdMultiChannelsNHWCTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<T>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
+ return SpaceToBatchNdMultiChannelsTest<ArmnnType>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdMultiBlockNHWCTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<T>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
+ return SpaceToBatchNdMultiBlockTest<ArmnnType>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdPaddingNHWCTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<T>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
+ return SpaceToBatchNdPaddingTest<ArmnnType>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
}
diff --git a/src/backends/backendsCommon/test/SplitterTestImpl.hpp b/src/backends/backendsCommon/test/SplitterTestImpl.hpp
index e88356ce21..004060f0b8 100644
--- a/src/backends/backendsCommon/test/SplitterTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterTestImpl.hpp
@@ -16,7 +16,7 @@
#include <test/TensorHelpers.hpp>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
std::vector<LayerTestResult<T,3>> SplitterTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -46,15 +46,15 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
// Define the tensor descriptors.
- armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType);
// Outputs of the original split.
- armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType);
// Outputs of the subsequent subtensor split.
- armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
// The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
@@ -245,13 +245,13 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> CopyViaSplitterTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale, int32_t qOffset)
{
- const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
+ const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType);
auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
{
1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
diff --git a/src/backends/backendsCommon/test/StridedSliceTestImpl.hpp b/src/backends/backendsCommon/test/StridedSliceTestImpl.hpp
index 1633151108..1bf5c642ad 100644
--- a/src/backends/backendsCommon/test/StridedSliceTestImpl.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceTestImpl.hpp
@@ -4,6 +4,7 @@
//
#pragma once
+#include "TypeUtils.hpp"
#include "WorkloadTestUtils.hpp"
#include <armnn/ArmNN.hpp>
@@ -71,7 +72,7 @@ LayerTestResult<T, OutDim> StridedSliceTestImpl(
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> StridedSlice4DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -87,8 +88,8 @@ LayerTestResult<T, 4> StridedSlice4DTest(
desc.m_Parameters.m_End = {2, 2, 3, 1};
desc.m_Parameters.m_Stride = {1, 1, 1, 1};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -108,7 +109,7 @@ LayerTestResult<T, 4> StridedSlice4DTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> StridedSlice4DReverseTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -124,8 +125,8 @@ LayerTestResult<T, 4> StridedSlice4DReverseTest(
desc.m_Parameters.m_End = {2, -3, 3, 1};
desc.m_Parameters.m_Stride = {1, -1, 1, 1};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -145,7 +146,7 @@ LayerTestResult<T, 4> StridedSlice4DReverseTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> StridedSliceSimpleStrideTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -161,8 +162,8 @@ LayerTestResult<T, 4> StridedSliceSimpleStrideTest(
desc.m_Parameters.m_End = {3, 2, 3, 1};
desc.m_Parameters.m_Stride = {2, 2, 2, 1};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -184,7 +185,7 @@ LayerTestResult<T, 4> StridedSliceSimpleStrideTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> StridedSliceSimpleRangeMaskTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -202,8 +203,8 @@ LayerTestResult<T, 4> StridedSliceSimpleRangeMaskTest(
desc.m_Parameters.m_BeginMask = (1 << 4) - 1;
desc.m_Parameters.m_EndMask = (1 << 4) - 1;
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -227,7 +228,7 @@ LayerTestResult<T, 4> StridedSliceSimpleRangeMaskTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> StridedSliceShrinkAxisMaskTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -245,8 +246,8 @@ LayerTestResult<T, 2> StridedSliceShrinkAxisMaskTest(
desc.m_Parameters.m_EndMask = (1 << 4) - 1;
desc.m_Parameters.m_ShrinkAxisMask = (1 << 1) | (1 << 2);
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -266,7 +267,7 @@ LayerTestResult<T, 2> StridedSliceShrinkAxisMaskTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> StridedSlice3DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -283,8 +284,8 @@ LayerTestResult<T, 3> StridedSlice3DTest(
desc.m_Parameters.m_Stride = {2, 2, 2};
desc.m_Parameters.m_EndMask = (1 << 3) - 1;
- inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -306,7 +307,7 @@ LayerTestResult<T, 3> StridedSlice3DTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> StridedSlice3DReverseTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -322,8 +323,8 @@ LayerTestResult<T, 3> StridedSlice3DReverseTest(
desc.m_Parameters.m_End = {-4, -4, -4};
desc.m_Parameters.m_Stride = {-2, -2, -2};
- inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -345,7 +346,7 @@ LayerTestResult<T, 3> StridedSlice3DReverseTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> StridedSlice2DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -362,8 +363,8 @@ LayerTestResult<T, 2> StridedSlice2DTest(
desc.m_Parameters.m_Stride = {2, 2};
desc.m_Parameters.m_EndMask = (1 << 2) - 1;
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
@@ -385,7 +386,7 @@ LayerTestResult<T, 2> StridedSlice2DTest(
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> StridedSlice2DReverseTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -403,8 +404,8 @@ LayerTestResult<T, 2> StridedSlice2DReverseTest(
desc.m_Parameters.m_BeginMask = (1 << 2) - 1;
desc.m_Parameters.m_EndMask = (1 << 2) - 1;
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index bf299dc0b5..ba94353049 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -19,32 +19,32 @@ BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32)
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test)
{
- MergerDim0EndToEnd<float>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<uint8_t>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test)
{
- MergerDim1EndToEnd<float>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<uint8_t>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test)
{
- MergerDim3EndToEnd<float>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<uint8_t>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 96f2f1fe77..2fe03abb3c 100755
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -414,17 +414,17 @@ ARMNN_AUTO_TEST_CASE(StridedSlice2DReverseUint8, StridedSlice2DReverseUint8Test)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32,
- CompareDepthwiseConvolution2dTest<float>,
+ CompareDepthwiseConvolution2dFloatTest,
armnn::DataLayout::NCHW)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8,
- CompareDepthwiseConvolution2dTest<uint8_t>,
+ CompareDepthwiseConvolution2dUint8Test,
armnn::DataLayout::NCHW)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32Nhwc,
- CompareDepthwiseConvolution2dTest<float>,
+ CompareDepthwiseConvolution2dFloatTest,
armnn::DataLayout::NHWC)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8Nhwc,
- CompareDepthwiseConvolution2dTest<uint8_t>,
+ CompareDepthwiseConvolution2dUint8Test,
armnn::DataLayout::NHWC)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest,
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 6f44cc4772..1eeb9ed98f 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -54,9 +54,9 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
int32_t qOffset = 0;
float qScale = 0.f;
- TensorInfo inputTensorInfo({num, channels, height, width}, GetDataType<float>());
- TensorInfo outputTensorInfo({num, channels, height, width}, GetDataType<float>());
- TensorInfo tensorInfo({channels}, GetDataType<float>());
+ TensorInfo inputTensorInfo({num, channels, height, width}, DataType::Float32);
+ TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32);
+ TensorInfo tensorInfo({channels}, DataType::Float32);
// Set quantization parameters if the requested type is a quantized type.
if(IsQuantizedType<float>())
@@ -143,4 +143,4 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
BOOST_AUTO_TEST_SUITE_END()
-#endif //aarch64 or x86_64 \ No newline at end of file
+#endif //aarch64 or x86_64
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 3ca415a1d1..665791a36a 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -53,32 +53,32 @@ BOOST_AUTO_TEST_CASE(FallbackToCpuRef)
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test)
{
- MergerDim0EndToEnd<float>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<uint8_t>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test)
{
- MergerDim1EndToEnd<float>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<uint8_t>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test)
{
- MergerDim3EndToEnd<float>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<uint8_t>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 09b47e5b3e..3d34934d06 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -440,17 +440,17 @@ ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhw
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32,
- CompareDepthwiseConvolution2dTest<float>,
+ CompareDepthwiseConvolution2dFloatTest,
armnn::DataLayout::NCHW)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8,
- CompareDepthwiseConvolution2dTest<uint8_t>,
+ CompareDepthwiseConvolution2dUint8Test,
armnn::DataLayout::NCHW)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32Nhwc,
- CompareDepthwiseConvolution2dTest<float>,
+ CompareDepthwiseConvolution2dFloatTest,
armnn::DataLayout::NHWC)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8Nhwc,
- CompareDepthwiseConvolution2dTest<uint8_t>,
+ CompareDepthwiseConvolution2dUint8Test,
armnn::DataLayout::NHWC)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest,
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index a9b3193692..d2d4460341 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -59,10 +59,10 @@ BOOST_AUTO_TEST_CASE(NeonTimerMeasure)
unsigned int outputBatchSize = inputBatchSize;
armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<float>());
+ armnn::DataType::Float32);
armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<float>());
+ armnn::DataType::Float32);
LayerTestResult<float, 4> result(inputTensorInfo);
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 9a4e60162f..4f4a161509 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -317,7 +317,7 @@ BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest)
const std::vector<float > expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- ArithmeticSimpleEndToEnd<float>(defaultBackends, LayerType::Equal, expectedOutput);
+ ArithmeticSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Equal, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest)
@@ -325,7 +325,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest)
const std::vector<float> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ArithmeticSimpleEndToEnd<float>(defaultBackends, LayerType::Greater, expectedOutput);
+ ArithmeticSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Greater, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
@@ -333,7 +333,7 @@ BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- ArithmeticSimpleEndToEnd<uint8_t>(defaultBackends, LayerType::Equal, expectedOutput);
+ ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Equal, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
@@ -341,7 +341,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ArithmeticSimpleEndToEnd<uint8_t>(defaultBackends, LayerType::Greater, expectedOutput);
+ ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Greater, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest)
@@ -349,7 +349,7 @@ BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest)
const std::vector<float > expectedOutput({ 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0 });
- ArithmeticBroadcastEndToEnd<float>(defaultBackends, LayerType::Equal, expectedOutput);
+ ArithmeticBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Equal, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest)
@@ -357,7 +357,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest)
const std::vector<float> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ArithmeticBroadcastEndToEnd<float>(defaultBackends, LayerType::Greater, expectedOutput);
+ ArithmeticBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Greater, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
@@ -365,7 +365,7 @@ BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0 });
- ArithmeticBroadcastEndToEnd<uint8_t>(defaultBackends, LayerType::Equal, expectedOutput);
+ ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Equal, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
@@ -373,47 +373,47 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ArithmeticBroadcastEndToEnd<uint8_t>(defaultBackends, LayerType::Greater, expectedOutput);
+ ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Greater, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
{
- MergerDim0EndToEnd<float>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<uint8_t>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test)
{
- MergerDim1EndToEnd<float>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<uint8_t>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test)
{
- MergerDim2EndToEnd<float>(defaultBackends);
+ MergerDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test)
{
- MergerDim2EndToEnd<uint8_t>(defaultBackends);
+ MergerDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test)
{
- MergerDim3EndToEnd<float>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<uint8_t>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
+BOOST_AUTO_TEST_SUITE_END()