aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2020-08-31 08:32:55 +0100
committerKeithARM <keith.davis@arm.com>2020-08-31 13:37:55 +0000
commitf500d6c22c7799dcc6b057d560fb88947ac63e6a (patch)
treeb81fcc7e1c35120916119a114a040cce3108ab45 /src
parent7faf9a88637afae76373a439d7eca3507b77c98c (diff)
downloadarmnn-f500d6c22c7799dcc6b057d560fb88947ac63e6a.tar.gz
IVGCVSW-5249 Use CreateTensorHandle from ITensorHandleFactory in the test for all layers between C-D
Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I9583adf50e67e63e73833f400d1c50fbff57f60c
Diffstat (limited to 'src')
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp233
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp162
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp383
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp119
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp32
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp15
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp266
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp47
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp9
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp11
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp11
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp3
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp11
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp3
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp304
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp284
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp456
19 files changed, 1418 insertions, 937 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 9c83b9d06c..be44234b76 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -26,7 +26,8 @@ template <std::size_t NumDims,
typename InType = armnn::ResolveType<ArmnnInType>>
LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::ComparisonDescriptor& descriptor,
const armnn::TensorShape& shape0,
std::vector<InType> values0,
@@ -56,11 +57,9 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
LayerTestResult<uint8_t, NumDims> ret(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
- std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
+ std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::ComparisonQueueDescriptor qDescriptor;
qDescriptor.m_Parameters = descriptor;
@@ -95,7 +94,8 @@ template <std::size_t NumDims,
typename InType = armnn::ResolveType<ArmnnInType>>
LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
armnn::IWorkloadFactory & workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::ComparisonDescriptor& descriptor,
const armnn::TensorShape& shape0,
std::vector<InType> values0,
@@ -109,6 +109,7 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
return ComparisonTestImpl<NumDims, ArmnnInType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
descriptor,
shape0,
values0,
@@ -148,6 +149,7 @@ std::vector<uint8_t> GetExpectedOutputData(const TestData& testData, armnn::Comp
template<armnn::DataType ArmnnInType, typename TestData>
LayerTestResult<uint8_t, 4> ComparisonTestImpl(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TestData& testData,
armnn::ComparisonOperation operation,
float quantScale = 1.f,
@@ -161,6 +163,7 @@ LayerTestResult<uint8_t, 4> ComparisonTestImpl(armnn::IWorkloadFactory& workload
return ComparisonTestImpl<4, ArmnnInType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
armnn::ComparisonDescriptor(operation),
testData.m_InputShape0,
inputData0,
@@ -339,198 +342,234 @@ static Broadcast1dVectorTestData s_Broadcast1dVectorTestData;
// Equal
LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Equal);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Equal);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Equal);
}
LayerTestResult<uint8_t, 4> EqualSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Equal);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Equal);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Equal);
}
LayerTestResult<uint8_t, 4> EqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Equal);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Equal);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Equal);
}
// Greater
LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Greater);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Greater);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Greater);
}
LayerTestResult<uint8_t, 4> GreaterSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Greater);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Greater);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Greater);
}
LayerTestResult<uint8_t, 4> GreaterSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Greater);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Greater);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Greater);
}
@@ -538,198 +577,234 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorUint8Test(
// GreaterOrEqual
LayerTestResult<uint8_t, 4> GreaterOrEqualSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
LayerTestResult<uint8_t, 4> GreaterOrEqualSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
LayerTestResult<uint8_t, 4> GreaterOrEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::GreaterOrEqual);
}
// Less
LayerTestResult<uint8_t, 4> LessSimpleTest(armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Less);
}
LayerTestResult<uint8_t, 4> LessBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Less);
}
LayerTestResult<uint8_t, 4> LessBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Less);
}
LayerTestResult<uint8_t, 4> LessSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Less);
}
LayerTestResult<uint8_t, 4> LessBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Less);
}
LayerTestResult<uint8_t, 4> LessBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Less);
}
LayerTestResult<uint8_t, 4> LessSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::Less);
}
LayerTestResult<uint8_t, 4> LessBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::Less);
}
LayerTestResult<uint8_t, 4> LessBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::Less);
}
@@ -737,99 +812,117 @@ LayerTestResult<uint8_t, 4> LessBroadcast1dVectorUint8Test(
// LessOrEqual
LayerTestResult<uint8_t, 4> LessOrEqualSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::LessOrEqual);
}
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::LessOrEqual);
}
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::LessOrEqual);
}
LayerTestResult<uint8_t, 4> LessOrEqualSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::LessOrEqual);
}
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::LessOrEqual);
}
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::LessOrEqual);
}
LayerTestResult<uint8_t, 4> LessOrEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::LessOrEqual);
}
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::LessOrEqual);
}
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::LessOrEqual);
}
@@ -837,99 +930,117 @@ LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorUint8Test(
// NotEqual
LayerTestResult<uint8_t, 4> NotEqualSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::NotEqual);
}
LayerTestResult<uint8_t, 4> NotEqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::NotEqual);
}
LayerTestResult<uint8_t, 4> NotEqualBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::NotEqual);
}
LayerTestResult<uint8_t, 4> NotEqualSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::NotEqual);
}
LayerTestResult<uint8_t, 4> NotEqualBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::NotEqual);
}
LayerTestResult<uint8_t, 4> NotEqualBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::Float16>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::NotEqual);
}
LayerTestResult<uint8_t, 4> NotEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_SimpleTestData,
armnn::ComparisonOperation::NotEqual);
}
LayerTestResult<uint8_t, 4> NotEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1ElementTestData,
armnn::ComparisonOperation::NotEqual);
}
LayerTestResult<uint8_t, 4> NotEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
s_Broadcast1dVectorTestData,
armnn::ComparisonOperation::NotEqual);
}
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp
index d53c62305a..301241785b 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.hpp
@@ -13,221 +13,275 @@
// Equal
LayerTestResult<uint8_t, 4> EqualSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> EqualBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> EqualSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> EqualBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> EqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> EqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
// Greater
LayerTestResult<uint8_t, 4> GreaterSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
// GreaterOrEqual
LayerTestResult<uint8_t, 4> GreaterOrEqualSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterOrEqualSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterOrEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
// Less
LayerTestResult<uint8_t, 4> LessSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
// LessOrEqual
LayerTestResult<uint8_t, 4> LessOrEqualSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessOrEqualSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessOrEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
// NotEqual
LayerTestResult<uint8_t, 4> NotEqualSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> NotEqualBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> NotEqualBroadcast1dVectorTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> NotEqualSimpleFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> NotEqualBroadcast1ElementFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> NotEqualBroadcast1dVectorFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> NotEqualSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> NotEqualBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> NotEqualBroadcast1dVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 7f047cd323..d486bc0c19 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -121,6 +121,7 @@ void Generate3dPermuteVectorForConcat(
template<typename T> void PermuteTensorData(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const PermutationVector& mappings,
TensorInfo & inputTensorInfo,
const T * inputData,
@@ -137,10 +138,8 @@ template<typename T> void PermuteTensorData(
}
TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
PermuteQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
@@ -172,6 +171,7 @@ template<typename T> void PermuteTensorData(
template<typename T> void PermuteInputsForConcat(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
std::vector<TensorInfo> & inputTensorInfos,
std::vector<T *> & inputData,
std::vector<std::vector<T>> & inputDataStorage,
@@ -215,6 +215,7 @@ template<typename T> void PermuteInputsForConcat(
PermuteTensorData<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
permutations.first,
newTensorInfo,
inputData[nthInput],
@@ -240,6 +241,7 @@ template<typename T> void PermuteInputsForConcat(
template <typename T> void PermuteOutputForConcat(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TensorInfo & tensorInfo,
const PermutationVector & permuteVector,
std::unique_ptr<ITensorHandle> && inputDataHandle,
@@ -262,6 +264,7 @@ template <typename T> void PermuteOutputForConcat(
PermuteTensorData<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
permuteVector,
resultTensorInfo,
&inputData[0],
@@ -273,6 +276,7 @@ template <typename T> void PermuteOutputForConcat(
template<typename T> void Concatenate(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
std::initializer_list<const TensorInfo> inputTensorInfosOrig,
std::initializer_list<T *> inputsOrig,
const TensorInfo& outputTensorInfoOrig,
@@ -311,6 +315,7 @@ template<typename T> void Concatenate(
//
PermuteInputsForConcat<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputTensorInfos,
inputs,
tmpInputDataStorage,
@@ -323,9 +328,9 @@ template<typename T> void Concatenate(
std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
inputHandles.reserve(inputCount);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
ConcatQueueDescriptor queueDescriptor;
OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
queueDescriptor.m_Parameters = viewsDescriptor;
@@ -338,21 +343,21 @@ template<typename T> void Concatenate(
queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
for (unsigned int i = 0; i < inputCount; ++i)
{
const TensorInfo& inputTensorInfo = inputTensorInfos[i];
- ARMNN_NO_DEPRECATE_WARN_BEGIN
+
std::unique_ptr<ITensorHandle> inputHandle =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle,
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle,
inputTensorInfo.GetShape(),
queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+
inputHandles.emplace_back(std::move(inputHandle));
}
@@ -362,9 +367,7 @@ template<typename T> void Concatenate(
{
for (unsigned int i = 0; i < inputCount; ++i)
{
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfos[i]);
inputHandles.emplace_back(std::move(inputHandle));
}
}
@@ -399,6 +402,7 @@ template<typename T> void Concatenate(
{
PermuteOutputForConcat<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
outputTensorInfo,
permuteVector,
std::move(outputHandle),
@@ -418,6 +422,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 1> Concat1dTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -433,7 +438,7 @@ LayerTestResult<T, 1> Concat1dTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ inputTensorInfo, inputTensorInfo, inputTensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -455,6 +460,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TensorInfo& outputTensorInfo,
unsigned int dimension,
const float qScale,
@@ -496,7 +502,7 @@ LayerTestResult<T, 2> Concat2dTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ inputTensorInfo, inputTensorInfo, inputTensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -512,13 +518,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dDim0TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
{
@@ -549,13 +556,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dDim1TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
{
@@ -574,6 +582,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -615,7 +624,7 @@ LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -653,6 +662,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -694,7 +704,7 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -720,6 +730,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TensorInfo& outputTensorInfo,
unsigned int dimension,
bool useSubtensor,
@@ -798,7 +809,7 @@ LayerTestResult<T, 3> Concat3dTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ inputTensorInfo, inputTensorInfo, inputTensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -814,13 +825,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim0TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
{
@@ -887,13 +899,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim1TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
{
@@ -960,6 +973,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim2TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor,
float qScale,
int32_t qOffset)
@@ -967,7 +981,7 @@ LayerTestResult<T, 3> Concat3dDim2TestImpl(
TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
{
@@ -998,6 +1012,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1075,7 +1090,7 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -1149,6 +1164,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1220,7 +1236,7 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -1288,6 +1304,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor,
float qScale,
int32_t qOffset)
@@ -1366,7 +1383,7 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
std::vector<T> output;
output.resize(outputTensorInfo.GetNumElements());
- Concatenate<T>(workloadFactory, memoryManager,
+ Concatenate<T>(workloadFactory, memoryManager, tensorHandleFactory,
{ input0TensorInfo, input1TensorInfo, input2TensorInfo },
{ input0.data(), input1.data(), input2.data() },
outputTensorInfo,
@@ -1404,6 +1421,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dTestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const TensorInfo& outputTensorInfo,
unsigned int dimension,
bool useSubtensor,
@@ -1452,6 +1470,7 @@ LayerTestResult<T, 4> Concat4dTestImpl(
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo, inputTensorInfo, inputTensorInfo},
{input0.data(), input1.data(), input2.data()},
outputTensorInfo,
@@ -1467,13 +1486,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDim0TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 0, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
{
@@ -1507,13 +1527,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDim1TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 1, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
{
@@ -1547,13 +1568,14 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDim2TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 2, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
{
@@ -1587,6 +1609,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDim3TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool useSubtensor)
@@ -1594,7 +1617,7 @@ LayerTestResult<T, 4> Concat4dDim3TestImpl(
TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
- workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
+ workloadFactory, memoryManager, tensorHandleFactory, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
{
@@ -1628,6 +1651,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1673,6 +1697,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
output.resize(outputTensorInfo.GetNumElements());
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo0, inputTensorInfo1},
{input0.data(), input1.data()},
outputTensorInfo,
@@ -1713,6 +1738,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1749,6 +1775,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
output.resize(outputTensorInfo.GetNumElements());
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo0, inputTensorInfo1},
{input0.data(), input1.data()},
outputTensorInfo,
@@ -1779,6 +1806,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -1818,6 +1846,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
output.resize(outputTensorInfo.GetNumElements());
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo0, inputTensorInfo1},
{input0.data(), input1.data()},
outputTensorInfo,
@@ -1855,6 +1884,7 @@ template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool useSubtensor)
@@ -1895,6 +1925,7 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
output.resize(outputTensorInfo.GetNumElements());
Concatenate<T>(workloadFactory,
memoryManager,
+ tensorHandleFactory,
{inputTensorInfo0, inputTensorInfo1},
{input0.data(), input1.data()},
outputTensorInfo,
@@ -1921,6 +1952,7 @@ template<DataType ArmnnType, typename T>
LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
IgnoreUnused(memoryManager);
@@ -2009,21 +2041,21 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
- ARMNN_NO_DEPRECATE_WARN_BEGIN
+
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
OriginsDescriptor desc = CreateDescriptorForConcatenation(
inputTensorShapes.begin(),inputTensorShapes.end(), 2);
@@ -2062,12 +2094,14 @@ template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3>
ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
template LayerTestResult<ResolveType<DataType::QSymmS16>, 3>
ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
//
@@ -2076,7 +2110,8 @@ ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
LayerTestResult<float,3> ConcatTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -2158,21 +2193,21 @@ LayerTestResult<float,3> ConcatTest(
std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
bool subTensorsSupported = workloadFactory.SupportsSubTensors();
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
@@ -2201,162 +2236,194 @@ LayerTestResult<float,3> ConcatTest(
LayerTestResult<float, 1> Concat1dTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 2> Concat2dDim0Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 2> Concat2dDim1Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ 0.0f,
+ 0);
}
LayerTestResult<float, 3> Concat3dDim0Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim1Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim2Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
- return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory,
+ useSubtensor, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
- workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDim0Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDim1Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDim2Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDim3Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
- return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0, useSubtensor);
}
LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager,
+ tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, useSubtensor);
}
LayerTestResult<Half, 3> ConcatFloat16Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<BFloat16, 3> ConcatBFloat16Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -2458,21 +2525,21 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
bool subTensorsSupported = workloadFactory.SupportsSubTensors();
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
@@ -2501,7 +2568,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
LayerTestResult<uint8_t, 3> ConcatUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -2595,21 +2663,20 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
bool subTensorsSupported = workloadFactory.SupportsSubTensors();
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
WorkloadInfo info;
@@ -2639,7 +2706,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
LayerTestResult<uint16_t, 3> ConcatUint16Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -2730,21 +2798,21 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
bool subTensorsSupported = workloadFactory.SupportsSubTensors();
std::unique_ptr<ITensorHandle> inputHandle1 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo1);
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
std::unique_ptr<ITensorHandle> inputHandle2 =
subTensorsSupported ?
- workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
- workloadFactory.CreateTensorHandle(inputTensorInfo2);
- ARMNN_NO_DEPRECATE_WARN_END
+ tensorHandleFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+ tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
+
ConcatQueueDescriptor data;
WorkloadInfo info;
@@ -2774,146 +2842,165 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
LayerTestResult<uint8_t, 1> Concat1dUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat3dDim2TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory, bool useSubtensor)
{
return Concat4dDim3TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
}
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
IWorkloadFactory& workloadFactory,
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor)
{
return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
- workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, -1, useSubtensor);
}
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp
index 167a547542..64e0c0a722 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp
@@ -18,198 +18,245 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<float, 3> ConcatTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<armnn::BFloat16, 3> ConcatBFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<armnn::Half, 3> ConcatFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 3> ConcatUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint16_t, 3> ConcatUint16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 1> Concat1dTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 2> Concat2dDim0Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 2> Concat2dDim1Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 3> Concat3dDim0Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 3> Concat3dDim1Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 3> Concat3dDim2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<float, 4> Concat4dDim0Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> Concat4dDim1Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> Concat4dDim2Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> Concat4dDim3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<uint8_t, 1> Concat1dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<uint8_t, 3> ConcatDifferentInputOutputQParamUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
LayerTestResult<int16_t, 3> ConcatDifferentInputOutputQParamInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool useSubtensor);
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index d84a9bf8bc..45c94d345b 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -25,6 +25,7 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ConstantTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset)
{
@@ -97,9 +98,9 @@ LayerTestResult<T, 4> ConstantTestImpl(
LayerTestResult<T, 4> result(outputTensorInfo);
result.outputExpected = input;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
@@ -124,35 +125,40 @@ LayerTestResult<T, 4> ConstantTestImpl(
LayerTestResult<float, 4> ConstantTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+ return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
}
LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1);
}
LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
- return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1);
}
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp
index 932965a947..71aacb5e62 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.hpp
@@ -12,20 +12,25 @@
LayerTestResult<float, 4> ConstantTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 407e627a39..e99a26e81e 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -201,6 +201,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const boost::multi_array<T, 4>& originalInput,
const boost::multi_array<T, 4>& originalKernel,
const boost::multi_array<B, 1>& bias,
@@ -312,10 +313,10 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
outputData = tmp;
}
ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
@@ -368,6 +369,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const boost::multi_array<T, 4>& input,
const boost::multi_array<T, 4>& kernel,
const boost::multi_array<B, 1>& bias,
@@ -418,10 +420,10 @@ LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
LayerTestResult<O, 4> ret(outputTensorInfo);
ret.outputExpected = MakeTensor<O, 4>(outputTensorInfo, outputData);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
@@ -461,6 +463,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T,4> Convolution1dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled)
@@ -539,10 +542,10 @@ LayerTestResult<T,4> Convolution1dTestImpl(
biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
1, outputSize);
}
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
+
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle weightsTensor(kernelInfo);
@@ -583,6 +586,7 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled,
@@ -623,6 +627,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
boost::multi_array<T, 1>(),
@@ -636,6 +641,7 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled,
@@ -685,6 +691,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
boost::multi_array<T, 1>(),
@@ -704,6 +711,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled,
@@ -777,6 +785,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
@@ -791,6 +800,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled,
@@ -856,6 +866,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
@@ -870,6 +881,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::DataLayout layout,
float qScale,
int32_t qOffset)
@@ -919,6 +931,7 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias2<ArmnnBType>(false, qScale * qScale),
@@ -937,6 +950,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::DataLayout layout,
float qScale,
int32_t qOffset)
@@ -979,6 +993,7 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias2<ArmnnBType>(false, qScale * qScale),
@@ -996,6 +1011,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const std::vector<float>& inputNoQuantizedValues,
armnn::TensorInfo& inputTensorInfo,
const std::vector<float>& kernelNoQuantizedValues,
@@ -1064,6 +1080,7 @@ LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
@@ -1085,6 +1102,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
@@ -1125,6 +1143,7 @@ LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputNoQuantizedValues,
inputTensorInfo,
kernelNoQuantizedValues,
@@ -1141,6 +1160,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
@@ -1196,6 +1216,7 @@ LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputNoQuantizedValues,
inputTensorInfo,
kernelNoQuantizedValues,
@@ -1212,6 +1233,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
@@ -1256,6 +1278,7 @@ LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputNoQuantizedValues,
inputTensorInfo,
kernelNoQuantizedValues,
@@ -1279,7 +1302,9 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- armnn::IWorkloadFactory& refWorkloadFactory)
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory)
{
unsigned int inputHeight = 8;
unsigned int inputWidth = 16;
@@ -1319,10 +1344,10 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908);
auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234);
auto bias = MakeRandomTensor<T, 1>(biasDesc, 1028);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::Convolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
@@ -1342,10 +1367,10 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
data.m_Parameters.m_PadTop = padY;
data.m_Parameters.m_PadBottom = padY;
data.m_Parameters.m_BiasEnabled = true;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+
armnn::Convolution2dQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
@@ -1377,6 +1402,7 @@ LayerTestResult<T,4> CompareConvolution2dTestImpl(
LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout& dataLayout)
{
@@ -1467,6 +1493,7 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
<armnn::DataType::BFloat16, armnn::DataType::Float32, armnn::BFloat16, float, armnn::DataType::Float32, float>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
boost::multi_array<float, 1>(),
@@ -1485,6 +1512,7 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout& dataLayout)
{
@@ -1575,6 +1603,7 @@ LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
<armnn::DataType::BFloat16, armnn::DataType::Float32, armnn::BFloat16, float, armnn::DataType::Float32, float>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
boost::multi_array<float, 1>(),
@@ -1599,6 +1628,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const boost::multi_array<T, 4>& input,
const boost::multi_array<T, 4>& kernel,
const boost::multi_array<B, 1>& bias,
@@ -1689,10 +1719,10 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
}
ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
@@ -1736,6 +1766,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled,
@@ -1846,10 +1877,10 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
}
ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
@@ -1889,6 +1920,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled,
@@ -2058,10 +2090,10 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
}
ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
@@ -2102,6 +2134,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const boost::multi_array<T, 4>& originalInput,
const boost::multi_array<T, 4>& originalKernel,
const boost::multi_array<B, 1>& bias,
@@ -2214,10 +2247,10 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
outputData = tmp;
}
ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
-ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
@@ -2265,6 +2298,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled,
@@ -2329,6 +2363,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
@@ -2349,6 +2384,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled)
@@ -2410,6 +2446,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
@@ -2430,6 +2467,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
float qScale,
int32_t qOffset,
bool biasEnabled)
@@ -2485,6 +2523,7 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
@@ -2506,6 +2545,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const std::vector<float>& inputNoQuantizedValues,
armnn::TensorInfo& inputTensorInfo,
const std::vector<float>& kernelNoQuantizedValues,
@@ -2574,6 +2614,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout),
@@ -2595,6 +2636,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
@@ -2635,6 +2677,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputNoQuantizedValues,
inputTensorInfo,
kernelNoQuantizedValues,
@@ -2651,6 +2694,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
@@ -2711,6 +2755,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputNoQuantizedValues,
inputTensorInfo,
kernelNoQuantizedValues,
@@ -2727,6 +2772,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
@@ -2803,6 +2849,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test(
return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputNoQuantizedValues,
inputTensorInfo,
kernelNoQuantizedValues,
@@ -2819,6 +2866,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
@@ -2872,6 +2920,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test(
return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
inputNoQuantizedValues,
inputTensorInfo,
kernelNoQuantizedValues,
@@ -2889,6 +2938,8 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory,
const armnnUtils::DataLayoutIndexed& layout)
{
unsigned int inputHeight = 8;
@@ -2951,10 +3002,10 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234, 0.0f, 255.0f);
auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type, 1>(
biasDesc, 1028, 0.0f, 255.0f);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::DepthwiseConvolution2dQueueDescriptor data;
armnn::WorkloadInfo info;
armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
@@ -2975,10 +3026,10 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
data.m_Parameters.m_PadBottom = padY;
data.m_Parameters.m_BiasEnabled = true;
data.m_Parameters.m_DataLayout = layout.GetDataLayout();
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+
armnn::DepthwiseConvolution2dQueueDescriptor refData = data;
armnn::WorkloadInfo refInfo = info;
SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
@@ -3014,6 +3065,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
Convolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3021,6 +3073,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3028,6 +3081,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3035,6 +3089,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3042,6 +3097,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3049,6 +3105,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3056,6 +3113,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
Convolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3063,6 +3121,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3070,6 +3129,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3077,6 +3137,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3084,6 +3145,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3091,6 +3153,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3098,6 +3161,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3105,6 +3169,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3112,6 +3177,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3119,6 +3185,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3126,6 +3193,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3133,6 +3201,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3140,6 +3209,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3147,6 +3217,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3154,6 +3225,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3161,6 +3233,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3168,6 +3241,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3175,6 +3249,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3182,6 +3257,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ const armnn::ITensorHandleFactory&,
bool,
armnn::DataLayout);
@@ -3189,6 +3265,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3196,6 +3273,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3203,6 +3281,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3210,6 +3289,7 @@ template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -3220,41 +3300,46 @@ DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float
LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
}
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled)
{
return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
0.f,
0,
biasEnabled,
@@ -3264,12 +3349,14 @@ LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
0.f,
0,
biasEnabled,
@@ -3279,73 +3366,81 @@ LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
armnn::DataLayout layout)
{
return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, layout, 0.0f, 0);
+ workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
}
LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
armnn::DataLayout layout)
{
return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, layout, 0.0f, 0);
+ workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
}
LayerTestResult<float, 4> Convolution1dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled)
{
return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
}
LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled)
{
return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128, biasEnabled);
}
LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::DataLayout layout)
{
using namespace armnn;
@@ -3402,10 +3497,10 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
descriptor.m_BiasEnabled = true;
descriptor.m_DataLayout = layout;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
+
WorkloadInfo workloadInfo;
ScopedCpuTensorHandle weightTensor(kernelInfo);
@@ -3440,44 +3535,50 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
LayerTestResult<float,4> CompareConvolution2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- armnn::IWorkloadFactory& refWorkloadFactory)
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory)
{
return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
- workloadFactory, memoryManager, refWorkloadFactory);
+ workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory);
}
LayerTestResult<float, 4> DepthwiseConvolution2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
}
LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled)
{
return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
}
LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
}
LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
@@ -3498,6 +3599,7 @@ LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
input,
kernel,
boost::multi_array<float, 1>(),
@@ -3510,40 +3612,45 @@ LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
- workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
}
LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory,
memoryManager,
+ tensorHandleFactory,
0.f,
0,
false);
@@ -3552,26 +3659,29 @@ LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout)
{
return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
- workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::DataLayout layout)
{
using namespace armnn;
@@ -3643,10 +3753,8 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
descriptor.m_BiasEnabled = true;
descriptor.m_DataLayout = layout;
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
- std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
WorkloadInfo workloadInfo;
ScopedCpuTensorHandle weightTensor(kernelInfo);
@@ -3683,18 +3791,22 @@ LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory,
const armnn::DataLayout layout)
{
return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
- workloadFactory, memoryManager, refWorkloadFactory, layout);
+ workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
}
LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory,
const armnn::DataLayout layout)
{
return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
- workloadFactory, memoryManager, refWorkloadFactory, layout);
+ workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
}
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp
index 21c4cf9bf0..1f54034703 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.hpp
@@ -22,6 +22,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -29,6 +30,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -36,95 +38,113 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled);
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
armnn::DataLayout layout);
LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
armnn::DataLayout layout);
LayerTestResult<float, 4> Convolution1dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled);
LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled);
LayerTestResult<float, 4> CompareConvolution2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- armnn::IWorkloadFactory& refWorkloadFactory);
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory);
LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::DataLayout layout);
LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout& dataLayout);
LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout& dataLayout);
@@ -136,6 +156,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -143,6 +164,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -150,6 +172,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -157,6 +180,7 @@ template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = arm
LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
@@ -165,76 +189,93 @@ LayerTestResult<T, 4> CompareDepthwiseConvolution2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory,
const armnn::DataLayout layout);
LayerTestResult<float, 4> DepthwiseConvolution2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled);
LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
bool biasEnabled,
const armnn::DataLayout layout);
LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
const armnn::DataLayout layout);
LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory,
const armnn::DataLayout layout);
LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::ITensorHandleFactory& refTensorHandleFactory,
const armnn::DataLayout layout);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
index e85af5683f..fdc6220d51 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp
@@ -12,7 +12,8 @@
LayerTestResult<float, 4> ConvertBf16ToFp32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -33,10 +34,8 @@ LayerTestResult<float, 4> ConvertBf16ToFp32Test(
{ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
armnn::ConvertBf16ToFp32QueueDescriptor data;
armnn::WorkloadInfo info;
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp
index 717ec6a121..08f4c04074 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp
@@ -14,4 +14,5 @@
LayerTestResult<float, 4> ConvertBf16ToFp32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
index fa2341e3a4..8745a5293b 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
@@ -15,7 +15,8 @@
LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
using namespace half_float::literal;
@@ -31,10 +32,10 @@ LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
{ -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::ConvertFp16ToFp32QueueDescriptor data;
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp
index f0f1a4bfb5..8eefb77892 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp
@@ -12,4 +12,5 @@
LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
index c721304b84..db832594cd 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
@@ -12,7 +12,8 @@
LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
@@ -53,10 +54,10 @@ LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
LayerTestResult<armnn::BFloat16, 4> ret(outputTensorInfo);
ret.outputExpected = MakeTensor<armnn::BFloat16, 4>(outputTensorInfo, outputValues);
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::ConvertFp32ToBf16QueueDescriptor data;
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp
index 4c6125f585..9e1da65c2e 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp
@@ -14,4 +14,5 @@
LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index 2041470aeb..5fbec56435 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -13,7 +13,8 @@
LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
{
IgnoreUnused(memoryManager);
using namespace half_float::literal;
@@ -29,10 +30,10 @@ LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
{ -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
- ARMNN_NO_DEPRECATE_WARN_BEGIN
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
- ARMNN_NO_DEPRECATE_WARN_END
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
armnn::ConvertFp32ToFp16QueueDescriptor data;
armnn::WorkloadInfo info;
AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp
index 263724710c..39dc8a4d4d 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp
@@ -14,4 +14,5 @@
LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 4b1fb698b3..67ae73e1de 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -108,118 +108,120 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedLarge, FullyConnectedLargeTest, fals
ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
// Convolution
-ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true)
-
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dNhwc, SimpleConvolution2d3x5Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8Nhwc, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dNhwc, SimpleConvolution2d3x5Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dStride2x2Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution1d, Convolution1dTest, true)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d, SimpleConvolution2d3x5Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dNhwc, SimpleConvolution2d3x5Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8Nhwc, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dNhwc, SimpleConvolution2d3x5Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dStride2x2Nhwc,
SimpleConvolution2d3x3Stride2x2Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPadding,
+ Convolution2dAsymmetricPaddingTest,
+ DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquareNhwc, SimpleConvolution2d3x3Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquareNhwc, SimpleConvolution2d3x3Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc,
Convolution2dAsymmetricPaddingTest,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Nhwc,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Uint8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcUint8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Nhwc,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Uint8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcUint8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
// Depthwise Convolution
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8,
DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NCHW)
// NHWC Depthwise Convolution
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Nhwc,
DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetric,
DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetric,
DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetricNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetricNhwc,
DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test);
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test);
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dPerAxisQuantTestNchw, DepthwiseConvolution2dPerAxisQuantTest,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNchw, DepthwiseConvolution2dPerAxisQuantTest,
DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dPerAxisQuantTestNhwc, DepthwiseConvolution2dPerAxisQuantTest,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNhwc, DepthwiseConvolution2dPerAxisQuantTest,
DataLayout::NHWC);
// Splitter
@@ -230,9 +232,9 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterFloat32, CopyViaSplitterFloat32Test
ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
// Concat
-ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
-ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
-ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
// Normalization
@@ -366,50 +368,50 @@ ARMNN_AUTO_TEST_CASE(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilo
ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
// Constant
-ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
-ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8SimpleQuantizationScaleNoOffsetTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantUint8, ConstantUint8SimpleQuantizationScaleNoOffsetTest)
// Concat
-ARMNN_AUTO_TEST_CASE(Concat1d, Concat1dTest)
-ARMNN_AUTO_TEST_CASE(Concat1dUint8, Concat1dUint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat2dDim0, Concat2dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim0Uint8, Concat2dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1, Concat2dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1Uint8, Concat2dDim1Uint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat2dDim0DiffInputDims, Concat2dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat2dDim0DiffInputDimsUint8, Concat2dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1DiffInputDims, Concat2dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1DiffInputDimsUint8, Concat2dDim1DiffInputDimsUint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat3dDim0, Concat3dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim0Uint8, Concat3dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1, Concat3dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1Uint8, Concat3dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2, Concat3dDim2Test, false)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2Uint8, Concat3dDim2Uint8Test, false)
-
-ARMNN_AUTO_TEST_CASE(Concat3dDim0DiffInputDims, Concat3dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat3dDim0DiffInputDimsUint8, Concat3dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1DiffInputDims, Concat3dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1DiffInputDimsUint8, Concat3dDim1DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2DiffInputDims, Concat3dDim2DiffInputDimsTest, false)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2DiffInputDimsUint8, Concat3dDim2DiffInputDimsUint8Test, false)
-
-ARMNN_AUTO_TEST_CASE(Concat4dDim0, Concat4dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim1, Concat4dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim3, Concat4dDim3Test, false)
-ARMNN_AUTO_TEST_CASE(Concat4dDim0Uint8, Concat4dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim1Uint8, Concat4dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim3Uint8, Concat4dDim3Uint8Test, false)
-
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim0, Concat4dDiffShapeDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim1, Concat4dDiffShapeDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3, Concat4dDiffShapeDim3Test, false)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim0Uint8, Concat4dDiffShapeDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim1Uint8, Concat4dDiffShapeDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1d, Concat1dTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1dUint8, Concat1dUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0, Concat2dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0Uint8, Concat2dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1, Concat2dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1Uint8, Concat2dDim1Uint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDims, Concat2dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDimsUint8, Concat2dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDims, Concat2dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDimsUint8, Concat2dDim1DiffInputDimsUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0, Concat3dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0Uint8, Concat3dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1, Concat3dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1Uint8, Concat3dDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2, Concat3dDim2Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2Uint8, Concat3dDim2Uint8Test, false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDims, Concat3dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDimsUint8, Concat3dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDims, Concat3dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDimsUint8, Concat3dDim1DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDims, Concat3dDim2DiffInputDimsTest, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDimsUint8, Concat3dDim2DiffInputDimsUint8Test, false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0, Concat4dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1, Concat4dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3, Concat4dDim3Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0Uint8, Concat4dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1Uint8, Concat4dDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3Uint8, Concat4dDim3Uint8Test, false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0, Concat4dDiffShapeDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1, Concat4dDiffShapeDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3, Concat4dDiffShapeDim3Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0Uint8, Concat4dDiffShapeDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1Uint8, Concat4dDiffShapeDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, false)
// DepthToSpace
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
@@ -534,9 +536,9 @@ ARMNN_AUTO_TEST_CASE(QLstm, QLstmTest)
ARMNN_AUTO_TEST_CASE(QuantizedLstm, QuantizedLstmTest)
// Convert from Float16 to Float32
-ARMNN_AUTO_TEST_CASE(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
// Convert from Float32 to Float16
-ARMNN_AUTO_TEST_CASE(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test)
ARMNN_AUTO_TEST_CASE(AdditionAfterMaxPool, AdditionAfterMaxPoolTest)
@@ -579,82 +581,82 @@ ARMNN_AUTO_TEST_CASE(MinimumBroadcast1Element2, MinimumBroadcast1ElementTest2)
ARMNN_AUTO_TEST_CASE(MinimumBroadcast1DVectorUint8, MinimumBroadcast1DVectorUint8Test)
// Equal
-ARMNN_AUTO_TEST_CASE(EqualSimple, EqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1dVector, EqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimple, EqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1Element, EqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVector, EqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(EqualSimpleFloat16, EqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementFloat16, EqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1dVectorFloat16, EqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleFloat16, EqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementFloat16, EqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorFloat16, EqualBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(EqualSimpleUint8, EqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1dVectorUint8, EqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleUint8, EqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorUint8, EqualBroadcast1dVectorUint8Test)
// Greater
-ARMNN_AUTO_TEST_CASE(GreaterSimple, GreaterSimpleTest)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVector, GreaterBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimple, GreaterSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVector, GreaterBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(GreaterSimpleFloat16, GreaterSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementFloat16, GreaterBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVectorFloat16, GreaterBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleFloat16, GreaterSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementFloat16, GreaterBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorFloat16, GreaterBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterSimpleUint8, GreaterSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleUint8, GreaterSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint8Test)
// GreaterOrEqual
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualSimple, GreaterOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1Element, GreaterOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1dVector, GreaterOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimple, GreaterOrEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1Element, GreaterOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVector, GreaterOrEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualSimpleFloat16, GreaterOrEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1ElementFloat16, GreaterOrEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1dVectorFloat16, GreaterOrEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleFloat16, GreaterOrEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementFloat16, GreaterOrEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorFloat16, GreaterOrEqualBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualSimpleUint8, GreaterOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1ElementUint8, GreaterOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1dVectorUint8, GreaterOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleUint8, GreaterOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementUint8, GreaterOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorUint8, GreaterOrEqualBroadcast1dVectorUint8Test)
// Less
-ARMNN_AUTO_TEST_CASE(LessSimple, LessSimpleTest)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1Element, LessBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1dVector, LessBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimple, LessSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1Element, LessBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVector, LessBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(LessSimpleFloat16, LessSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1ElementFloat16, LessBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1dVectorFloat16, LessBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleFloat16, LessSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementFloat16, LessBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorFloat16, LessBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessSimpleUint8, LessSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1ElementUint8, LessBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1dVectorUint8, LessBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleUint8, LessSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementUint8, LessBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorUint8, LessBroadcast1dVectorUint8Test)
// LessOrEqual
-ARMNN_AUTO_TEST_CASE(LessOrEqualSimple, LessOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1Element, LessOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1dVector, LessOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimple, LessOrEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1Element, LessOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVector, LessOrEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualSimpleFloat16, LessOrEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1ElementFloat16, LessOrEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1dVectorFloat16, LessOrEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleFloat16, LessOrEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementFloat16, LessOrEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorFloat16, LessOrEqualBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualSimpleUint8, LessOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1ElementUint8, LessOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1dVectorUint8, LessOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleUint8, LessOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementUint8, LessOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorUint8, LessOrEqualBroadcast1dVectorUint8Test)
// NotEqual
-ARMNN_AUTO_TEST_CASE(NotEqualSimple, NotEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1Element, NotEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVector, NotEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimple, NotEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1Element, NotEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVector, NotEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(NotEqualSimpleFloat16, NotEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1ElementFloat16, NotEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVectorFloat16, NotEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleFloat16, NotEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementFloat16, NotEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorFloat16, NotEqualBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(NotEqualSimpleUint8, NotEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1ElementUint8, NotEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleUint8, NotEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementUint8, NotEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUint8Test)
// Softmax
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f)
@@ -1231,19 +1233,19 @@ ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareSoftmaxBeta1WithReference, Comp
ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareSoftmaxBeta2WithReference, CompareSoftmaxTest, 2.0f)
ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareSoftmaxUint8, CompareSoftmaxUint8Test, 1.0f)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest)
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareConv2dWithReference, CompareConvolution2dTest)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32,
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareDepthwiseConv2dWithReferenceFloat32,
CompareDepthwiseConvolution2dFloatTest,
DataLayout::NCHW)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8,
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareDepthwiseConv2dWithReferenceUint8,
CompareDepthwiseConvolution2dUint8Test,
DataLayout::NCHW)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32Nhwc,
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareDepthwiseConv2dWithReferenceFloat32Nhwc,
CompareDepthwiseConvolution2dFloatTest,
DataLayout::NHWC)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8Nhwc,
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareDepthwiseConv2dWithReferenceUint8Nhwc,
CompareDepthwiseConvolution2dUint8Test,
DataLayout::NHWC)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 7855a28689..4c0d6a6b92 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -53,116 +53,118 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
// Convolution
-ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true)
-
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dNhwc, SimpleConvolution2d3x5Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8Nhwc, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dNhwc, SimpleConvolution2d3x5Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dStride2x2Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution1d, Convolution1dTest, true)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d, SimpleConvolution2d3x5Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dNhwc, SimpleConvolution2d3x5Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8Nhwc, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dNhwc, SimpleConvolution2d3x5Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dStride2x2Nhwc,
SimpleConvolution2d3x3Stride2x2Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPadding,
+ Convolution2dAsymmetricPaddingTest,
+ DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquareNhwc, SimpleConvolution2d3x3Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquareNhwc, SimpleConvolution2d3x3Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc,
Convolution2dAsymmetricPaddingTest,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Nhwc,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Int8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcInt8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Uint8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcUint8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Nhwc,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Int8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcInt8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Uint8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcUint8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult4,
DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>,
false,
armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult2,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2,
DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>,
false,
armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
// DepthToSpace
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
@@ -216,44 +218,44 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
// Depthwise Convolution
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8,
DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NCHW)
// NHWC Depthwise Convolution
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1NHhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1NHhwc,
DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
-ARMNN_AUTO_TEST_CASE(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetric,
DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetric,
DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetricNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetricNhwc,
DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test);
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test);
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dPerAxisQuantTestNchw, DepthwiseConvolution2dPerAxisQuantTest,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNchw, DepthwiseConvolution2dPerAxisQuantTest,
DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dPerAxisQuantTestNhwc, DepthwiseConvolution2dPerAxisQuantTest,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNhwc, DepthwiseConvolution2dPerAxisQuantTest,
DataLayout::NHWC);
namespace
@@ -545,16 +547,16 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterFloat32, CopyViaSplitterFloat32Test
ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
// Concat
-ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
-ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
-ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
// Convert from BFloat16 to Float32
-ARMNN_AUTO_TEST_CASE(ConvertBf16ToFp32, ConvertBf16ToFp32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test)
// Convert from Float32 to BFloat16
-ARMNN_AUTO_TEST_CASE(ConvertFp32ToBf16, ConvertFp32ToBf16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertFp32ToBf16, ConvertFp32ToBf16Test)
// Fully Connected
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
@@ -606,50 +608,50 @@ ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLay
ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
// Constant
-ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
-ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8SimpleQuantizationScaleNoOffsetTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantUint8, ConstantUint8SimpleQuantizationScaleNoOffsetTest)
// Concat
-ARMNN_AUTO_TEST_CASE(Concat1d, Concat1dTest)
-ARMNN_AUTO_TEST_CASE(Concat1dUint8, Concat1dUint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat2dDim0, Concat2dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim0Uint8, Concat2dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1, Concat2dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1Uint8, Concat2dDim1Uint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat2dDim0DiffInputDims, Concat2dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat2dDim0DiffInputDimsUint8, Concat2dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1DiffInputDims, Concat2dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1DiffInputDimsUint8, Concat2dDim1DiffInputDimsUint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat3dDim0, Concat3dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim0Uint8, Concat3dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1, Concat3dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1Uint8, Concat3dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2, Concat3dDim2Test, false)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2Uint8, Concat3dDim2Uint8Test, false)
-
-ARMNN_AUTO_TEST_CASE(Concat3dDim0DiffInputDims, Concat3dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat3dDim0DiffInputDimsUint8, Concat3dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1DiffInputDims, Concat3dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1DiffInputDimsUint8, Concat3dDim1DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2DiffInputDims, Concat3dDim2DiffInputDimsTest, false)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2DiffInputDimsUint8, Concat3dDim2DiffInputDimsUint8Test, false)
-
-ARMNN_AUTO_TEST_CASE(Concat4dDim0, Concat4dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim1, Concat4dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim3, Concat4dDim3Test, false)
-ARMNN_AUTO_TEST_CASE(Concat4dDim0Uint8, Concat4dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim1Uint8, Concat4dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim3Uint8, Concat4dDim3Uint8Test, false)
-
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim0, Concat4dDiffShapeDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim1, Concat4dDiffShapeDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3, Concat4dDiffShapeDim3Test, false)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim0Uint8, Concat4dDiffShapeDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim1Uint8, Concat4dDiffShapeDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1d, Concat1dTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1dUint8, Concat1dUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0, Concat2dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0Uint8, Concat2dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1, Concat2dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1Uint8, Concat2dDim1Uint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDims, Concat2dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDimsUint8, Concat2dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDims, Concat2dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDimsUint8, Concat2dDim1DiffInputDimsUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0, Concat3dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0Uint8, Concat3dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1, Concat3dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1Uint8, Concat3dDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2, Concat3dDim2Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2Uint8, Concat3dDim2Uint8Test, false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDims, Concat3dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDimsUint8, Concat3dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDims, Concat3dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDimsUint8, Concat3dDim1DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDims, Concat3dDim2DiffInputDimsTest, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDimsUint8, Concat3dDim2DiffInputDimsUint8Test, false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0, Concat4dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1, Concat4dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3, Concat4dDim3Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0Uint8, Concat4dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1Uint8, Concat4dDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3Uint8, Concat4dDim3Uint8Test, false)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0, Concat4dDiffShapeDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1, Concat4dDiffShapeDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3, Concat4dDiffShapeDim3Test, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0Uint8, Concat4dDiffShapeDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1Uint8, Concat4dDiffShapeDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, false)
// L2 Normalization
ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
@@ -677,58 +679,58 @@ ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Tes
ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
// Equal
-ARMNN_AUTO_TEST_CASE(EqualSimple, EqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1dVector, EqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimple, EqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1Element, EqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVector, EqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(EqualSimpleUint8, EqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1dVectorUint8, EqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleUint8, EqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorUint8, EqualBroadcast1dVectorUint8Test)
// Greater
-ARMNN_AUTO_TEST_CASE(GreaterSimple, GreaterSimpleTest)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVector, GreaterBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimple, GreaterSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVector, GreaterBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(GreaterSimpleUint8, GreaterSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleUint8, GreaterSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint8Test)
// GreaterOrEqual
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualSimple, GreaterOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1Element, GreaterOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1dVector, GreaterOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimple, GreaterOrEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1Element, GreaterOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVector, GreaterOrEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualSimpleUint8, GreaterOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1ElementUint8, GreaterOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1dVectorUint8, GreaterOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleUint8, GreaterOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementUint8, GreaterOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorUint8, GreaterOrEqualBroadcast1dVectorUint8Test)
// Less
-ARMNN_AUTO_TEST_CASE(LessSimple, LessSimpleTest)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1Element, LessBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1dVector, LessBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimple, LessSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1Element, LessBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVector, LessBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(LessSimpleUint8, LessSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1ElementUint8, LessBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1dVectorUint8, LessBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleUint8, LessSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementUint8, LessBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorUint8, LessBroadcast1dVectorUint8Test)
// LessOrEqual
-ARMNN_AUTO_TEST_CASE(LessOrEqualSimple, LessOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1Element, LessOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1dVector, LessOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimple, LessOrEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1Element, LessOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVector, LessOrEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualSimpleUint8, LessOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1ElementUint8, LessOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1dVectorUint8, LessOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleUint8, LessOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementUint8, LessOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorUint8, LessOrEqualBroadcast1dVectorUint8Test)
// NotEqual
-ARMNN_AUTO_TEST_CASE(NotEqualSimple, NotEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1Element, NotEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVector, NotEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimple, NotEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1Element, NotEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVector, NotEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(NotEqualSimpleUint8, NotEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1ElementUint8, NotEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleUint8, NotEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementUint8, NotEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUint8Test)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
@@ -1317,19 +1319,19 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFillS32, SimpleFillTest<DataType::Signed32>)
// ============================================================================
// COMPARE tests
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest)
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareConv2dWithReference, CompareConvolution2dTest)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32,
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareDepthwiseConv2dWithReferenceFloat32,
CompareDepthwiseConvolution2dFloatTest,
DataLayout::NCHW)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8,
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareDepthwiseConv2dWithReferenceUint8,
CompareDepthwiseConvolution2dUint8Test,
DataLayout::NCHW)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32Nhwc,
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareDepthwiseConv2dWithReferenceFloat32Nhwc,
CompareDepthwiseConvolution2dFloatTest,
DataLayout::NHWC)
-ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8Nhwc,
+ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(CompareDepthwiseConv2dWithReferenceUint8Nhwc,
CompareDepthwiseConvolution2dUint8Test,
DataLayout::NHWC)
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index f03f320df5..4feba22fcd 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -23,341 +23,349 @@ using FactoryType = RefWorkloadFactory;
// UNIT tests
// Convolution
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5, SimpleConvolution2d3x5Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5Uint8, SimpleConvolution2d3x5Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5Nhwc, SimpleConvolution2d3x5Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5Uint8Nhwc, SimpleConvolution2d3x5Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5QSymm16, SimpleConvolution2d3x5QSymm16Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5QSymm16Nhwc, SimpleConvolution2d3x5QSymm16Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x5, SimpleConvolution2d3x5Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x5Uint8, SimpleConvolution2d3x5Uint8Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x5Nhwc, SimpleConvolution2d3x5Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x5Uint8Nhwc, SimpleConvolution2d3x5Uint8Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x5QSymm16, SimpleConvolution2d3x5QSymm16Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x5QSymm16Nhwc,
+ SimpleConvolution2d3x5QSymm16Test,
+ true,
+ DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolutionUint8, SimpleConvolution2d3x5Uint8Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dNhwc, SimpleConvolution2d3x5Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolutionUint8Nhwc, SimpleConvolution2d3x5Uint8Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolutionUint8, SimpleConvolution2d3x5Uint8Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dNhwc, SimpleConvolution2d3x5Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolutionUint8Nhwc, SimpleConvolution2d3x5Uint8Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution1dUint8, Convolution1dUint8Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution1d, Convolution1dTest, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution1dUint8, Convolution1dUint8Test, true)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3, SimpleConvolution2d3x3Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3QSymm16, SimpleConvolution2d3x3QSymm16Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3, SimpleConvolution2d3x3Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3QSymm16, SimpleConvolution2d3x3QSymm16Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Nhwc, SimpleConvolution2d3x3Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8Nhwc, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3QSymm16Nhwc, SimpleConvolution2d3x3QSymm16Test, true,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Nhwc, SimpleConvolution2d3x3Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8Nhwc, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3QSymm16Nhwc, SimpleConvolution2d3x3QSymm16Test, true,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquareNhwc, SimpleConvolution2d3x3Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquareNhwc, SimpleConvolution2d3x3Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquareStride2x2Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquareStride2x2Nhwc,
SimpleConvolution2d3x3Stride2x2Test,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingLargerThanHalfKernelSize,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingLargerThanHalfKernelSize,
Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPadding,
+ Convolution2dAsymmetricPaddingTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingLargerThanHalfKernelSizeNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingLargerThanHalfKernelSizeNhwc,
Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc,
Convolution2dAsymmetricPaddingTest,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3BFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3BFloat16,
Convolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcBFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcBFloat16,
Convolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Nhwc,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Int8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcInt8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Uint8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcUint8,
Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Int16,
Convolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcInt16,
Convolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3BFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3BFloat16,
Convolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcBFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcBFloat16,
Convolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Nhwc,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Int8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcInt8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Uint8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcUint8,
Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Int16,
Convolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcInt16,
Convolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3BFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3BFloat16,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcBFloat16,
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcBFloat16,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int16,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt16,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Stride2x2Bf16, Convolution2d3x3Stride2x2BFloat16Test, false, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(Convolution2d3x3Stride2x2BFloat16SmallValue,
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2Bf16,
+ Convolution2d3x3Stride2x2BFloat16Test,
+ false,
+ DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2BFloat16SmallValue,
Convolution2d3x3Stride2x2BFloat16SmallValueTest,
false,
DataLayout::NHWC);
// Depthwise Convolution
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d, DepthwiseConvolution2dTest, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d, DepthwiseConvolution2dTest, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2d, DepthwiseConvolution2dTest, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2d, DepthwiseConvolution2dTest, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dUint8,
DepthwiseConvolution2dUint8Test,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dQSymm16, DepthwiseConvolution2dInt16Test, true, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dQSymm16, DepthwiseConvolution2dInt16Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dNhwc, DepthwiseConvolution2dTest, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dUint8Nhwc, DepthwiseConvolution2dUint8Test, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dNhwc, DepthwiseConvolution2dTest, true, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dUint8Nhwc, DepthwiseConvolution2dUint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dNhwc, DepthwiseConvolution2dTest, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dUint8Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dNhwc, DepthwiseConvolution2dTest, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dUint8Nhwc,
DepthwiseConvolution2dUint8Test,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
-ARMNN_AUTO_TEST_CASE(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Nhwc,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3BFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3BFloat16,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Int8,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3NhwcInt8,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Uint8,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3NhwcUint8,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Int16,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3NhwcInt16,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Nhwc,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3BFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3BFloat16,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Int8,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt8,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Uint8,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcUint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3NhwcUint8,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Int16,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt16,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult4,
DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>,
false,
armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult2,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2,
DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>,
false,
armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4BFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult4BFloat16,
DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>,
false,
armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult2BFloat16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2BFloat16,
DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>,
false,
armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8,
DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Int16,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Int16,
DepthwiseConvolution2dDepthMul1Int16Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Nhwc,
DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetric,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetric,
DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetric,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetric,
DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetricNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetricNhwc,
DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
+ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test);
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test);
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dPerAxisQuantTestNchw, DepthwiseConvolution2dPerAxisQuantTest,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNchw, DepthwiseConvolution2dPerAxisQuantTest,
DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dPerAxisQuantTestNhwc, DepthwiseConvolution2dPerAxisQuantTest,
+ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNhwc, DepthwiseConvolution2dPerAxisQuantTest,
DataLayout::NHWC);
// Pooling
@@ -588,15 +596,15 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterInt16, CopyViaSplitterInt16Test)
// Concat
-ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
-ARMNN_AUTO_TEST_CASE(ConcatBFloat16, ConcatBFloat16Test)
-ARMNN_AUTO_TEST_CASE(ConcatFloat16, ConcatFloat16Test)
-ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
-ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest)
-ARMNN_AUTO_TEST_CASE(ConcatUint16, ConcatUint16Test)
-ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatBFloat16, ConcatBFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatFloat16, ConcatFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint16, ConcatUint16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, true)
-ARMNN_AUTO_TEST_CASE(ConcatInt16DifferentInputOutputQParam,
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatInt16DifferentInputOutputQParam,
ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>, true)
// Add
@@ -663,82 +671,82 @@ ARMNN_AUTO_TEST_CASE(DivisionInt32Broadcast1Element, DivisionBroadcast1ElementIn
ARMNN_AUTO_TEST_CASE(DivisionInt32Broadcast1DVector, DivisionBroadcast1DVectorInt32Test)
// Equal
-ARMNN_AUTO_TEST_CASE(EqualSimple, EqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1dVector, EqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimple, EqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1Element, EqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVector, EqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(EqualSimpleFloat16, EqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementFloat16, EqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1dVectorFloat16, EqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleFloat16, EqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementFloat16, EqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorFloat16, EqualBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(EqualSimpleUint8, EqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(EqualBroadcast1dVectorUint8, EqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleUint8, EqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorUint8, EqualBroadcast1dVectorUint8Test)
// Greater
-ARMNN_AUTO_TEST_CASE(GreaterSimple, GreaterSimpleTest)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVector, GreaterBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimple, GreaterSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVector, GreaterBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(GreaterSimpleFloat16, GreaterSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementFloat16, GreaterBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVectorFloat16, GreaterBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleFloat16, GreaterSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementFloat16, GreaterBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorFloat16, GreaterBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterSimpleUint8, GreaterSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleUint8, GreaterSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint8Test)
// GreaterOrEqual
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualSimple, GreaterOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1Element, GreaterOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1dVector, GreaterOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimple, GreaterOrEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1Element, GreaterOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVector, GreaterOrEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualSimpleFloat16, GreaterOrEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1ElementFloat16, GreaterOrEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1dVectorFloat16, GreaterOrEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleFloat16, GreaterOrEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementFloat16, GreaterOrEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorFloat16, GreaterOrEqualBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualSimpleUint8, GreaterOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1ElementUint8, GreaterOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(GreaterOrEqualBroadcast1dVectorUint8, GreaterOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleUint8, GreaterOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementUint8, GreaterOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorUint8, GreaterOrEqualBroadcast1dVectorUint8Test)
// Less
-ARMNN_AUTO_TEST_CASE(LessSimple, LessSimpleTest)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1Element, LessBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1dVector, LessBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimple, LessSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1Element, LessBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVector, LessBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(LessSimpleFloat16, LessSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1ElementFloat16, LessBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1dVectorFloat16, LessBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleFloat16, LessSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementFloat16, LessBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorFloat16, LessBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessSimpleUint8, LessSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1ElementUint8, LessBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(LessBroadcast1dVectorUint8, LessBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleUint8, LessSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementUint8, LessBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorUint8, LessBroadcast1dVectorUint8Test)
// LessOrEqual
-ARMNN_AUTO_TEST_CASE(LessOrEqualSimple, LessOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1Element, LessOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1dVector, LessOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimple, LessOrEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1Element, LessOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVector, LessOrEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(LessOrEqualSimpleFloat16, LessOrEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1ElementFloat16, LessOrEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1dVectorFloat16, LessOrEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleFloat16, LessOrEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementFloat16, LessOrEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorFloat16, LessOrEqualBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualSimpleUint8, LessOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1ElementUint8, LessOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(LessOrEqualBroadcast1dVectorUint8, LessOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleUint8, LessOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementUint8, LessOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorUint8, LessOrEqualBroadcast1dVectorUint8Test)
// NotEqual
-ARMNN_AUTO_TEST_CASE(NotEqualSimple, NotEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1Element, NotEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVector, NotEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimple, NotEqualSimpleTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1Element, NotEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVector, NotEqualBroadcast1dVectorTest)
-ARMNN_AUTO_TEST_CASE(NotEqualSimpleFloat16, NotEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1ElementFloat16, NotEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVectorFloat16, NotEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleFloat16, NotEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementFloat16, NotEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorFloat16, NotEqualBroadcast1dVectorFloat16Test)
-ARMNN_AUTO_TEST_CASE(NotEqualSimpleUint8, NotEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1ElementUint8, NotEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleUint8, NotEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementUint8, NotEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUint8Test)
// Max
ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
@@ -1308,55 +1316,55 @@ ARMNN_AUTO_TEST_CASE(PadInt83d, PadInt83dTest)
ARMNN_AUTO_TEST_CASE(PadInt84d, PadInt84dTest)
// Constant
-ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
-ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8CustomQuantizationScaleAndOffsetTest)
-ARMNN_AUTO_TEST_CASE(ConstantInt16, ConstantInt16CustomQuantizationScaleAndOffsetTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantUint8, ConstantUint8CustomQuantizationScaleAndOffsetTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantInt16, ConstantInt16CustomQuantizationScaleAndOffsetTest)
// Concat
-ARMNN_AUTO_TEST_CASE(Concat1d, Concat1dTest)
-ARMNN_AUTO_TEST_CASE(Concat1dUint8, Concat1dUint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat2dDim0, Concat2dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim0Uint8, Concat2dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1, Concat2dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1Uint8, Concat2dDim1Uint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat2dDim0DiffInputDims, Concat2dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat2dDim0DiffInputDimsUint8, Concat2dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1DiffInputDims, Concat2dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat2dDim1DiffInputDimsUint8, Concat2dDim1DiffInputDimsUint8Test)
-
-ARMNN_AUTO_TEST_CASE(Concat3dDim0, Concat3dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim0Uint8, Concat3dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1, Concat3dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1Uint8, Concat3dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2, Concat3dDim2Test, true)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2Uint8, Concat3dDim2Uint8Test, true)
-
-ARMNN_AUTO_TEST_CASE(Concat3dDim0DiffInputDims, Concat3dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat3dDim0DiffInputDimsUint8, Concat3dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1DiffInputDims, Concat3dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE(Concat3dDim1DiffInputDimsUint8, Concat3dDim1DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2DiffInputDims, Concat3dDim2DiffInputDimsTest, true)
-ARMNN_AUTO_TEST_CASE(Concat3dDim2DiffInputDimsUint8, Concat3dDim2DiffInputDimsUint8Test, true)
-
-ARMNN_AUTO_TEST_CASE(Concat4dDim0, Concat4dDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim1, Concat4dDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim2, Concat4dDim2Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim3, Concat4dDim3Test, true)
-ARMNN_AUTO_TEST_CASE(Concat4dDim0Uint8, Concat4dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim1Uint8, Concat4dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim2Uint8, Concat4dDim2Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDim3Uint8, Concat4dDim3Uint8Test, true)
-
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim0, Concat4dDiffShapeDim0Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim1, Concat4dDiffShapeDim1Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim2, Concat4dDiffShapeDim2Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3, Concat4dDiffShapeDim3Test, true)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim0Uint8, Concat4dDiffShapeDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim1Uint8, Concat4dDiffShapeDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim2Uint8, Concat4dDiffShapeDim2Uint8Test)
-ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1d, Concat1dTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1dUint8, Concat1dUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0, Concat2dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0Uint8, Concat2dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1, Concat2dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1Uint8, Concat2dDim1Uint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDims, Concat2dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDimsUint8, Concat2dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDims, Concat2dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDimsUint8, Concat2dDim1DiffInputDimsUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0, Concat3dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0Uint8, Concat3dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1, Concat3dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1Uint8, Concat3dDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2, Concat3dDim2Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2Uint8, Concat3dDim2Uint8Test, true)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDims, Concat3dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDimsUint8, Concat3dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDims, Concat3dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDimsUint8, Concat3dDim1DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDims, Concat3dDim2DiffInputDimsTest, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDimsUint8, Concat3dDim2DiffInputDimsUint8Test, true)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0, Concat4dDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1, Concat4dDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim2, Concat4dDim2Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3, Concat4dDim3Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0Uint8, Concat4dDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1Uint8, Concat4dDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim2Uint8, Concat4dDim2Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3Uint8, Concat4dDim3Uint8Test, true)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0, Concat4dDiffShapeDim0Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1, Concat4dDiffShapeDim1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim2, Concat4dDiffShapeDim2Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3, Concat4dDiffShapeDim3Test, true)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0Uint8, Concat4dDiffShapeDim0Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1Uint8, Concat4dDiffShapeDim1Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim2Uint8, Concat4dDiffShapeDim2Uint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, true)
// Fill
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFill, SimpleFillTest<DataType::Float32>)
@@ -1447,15 +1455,15 @@ ARMNN_AUTO_TEST_CASE(QLstm1, QLstmTest1)
ARMNN_AUTO_TEST_CASE(QLstm2, QLstmTest2)
// Convert from BFloat16 to Float32
-ARMNN_AUTO_TEST_CASE(ConvertBf16ToFp32, ConvertBf16ToFp32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test)
// Convert from Float32 to BFloat16
-ARMNN_AUTO_TEST_CASE(ConvertFp32ToBf16, ConvertFp32ToBf16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertFp32ToBf16, ConvertFp32ToBf16Test)
// Convert from Float16 to Float32
-ARMNN_AUTO_TEST_CASE(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
// Convert from Float32 to Float16
-ARMNN_AUTO_TEST_CASE(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test)
// Mean
ARMNN_AUTO_TEST_CASE(MeanSimpleFloat32, MeanSimpleTest<DataType::Float32>)