diff options
author | Matthew Jackson <matthew.jackson@arm.com> | 2019-09-12 09:08:23 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-09-12 15:28:01 +0000 |
commit | 9bff14458f9950a5d31b9523c62c0bbf79a65fcf (patch) | |
tree | 8252812da63458e38b704a3abba3e1d5a35e1bf2 /src/backends/backendsCommon/test | |
parent | 1e0466c4ab26e82abed7f8f263dfe6a2a543cc1a (diff) | |
download | armnn-9bff14458f9950a5d31b9523c62c0bbf79a65fcf.tar.gz |
IVGCVSW-3857 Add Reference FP16 workload support to remaining layers
* Adds Reference FP16 support and unit tests for layers not already supported
!referencetests:202156
Signed-off-by: Matthew Jackson <matthew.jackson@arm.com>
Change-Id: I6fc9b9ce2809e163f72e27e877025c8fb85d9fbe
Diffstat (limited to 'src/backends/backendsCommon/test')
30 files changed, 1291 insertions, 131 deletions
diff --git a/src/backends/backendsCommon/test/QuantizeHelper.hpp b/src/backends/backendsCommon/test/QuantizeHelper.hpp index a0c6553e24..b7ca3b34c0 100644 --- a/src/backends/backendsCommon/test/QuantizeHelper.hpp +++ b/src/backends/backendsCommon/test/QuantizeHelper.hpp @@ -8,6 +8,8 @@ #include <armnn/ArmNN.hpp> #include <armnn/TypesUtils.hpp> +#include <Half.hpp> + #include <initializer_list> #include <iterator> #include <vector> @@ -45,6 +47,22 @@ struct SelectiveQuantizer<T, false> } }; +template<> +struct SelectiveQuantizer<armnn::Half, false> +{ + static armnn::Half Quantize(float value, float scale, int32_t offset) + { + boost::ignore_unused(scale, offset); + return armnn::Half(value); + } + + static float Dequantize(armnn::Half value, float scale, int32_t offset) + { + boost::ignore_unused(scale, offset); + return value; + } +}; + template<typename T> T SelectiveQuantize(float value, float scale, int32_t offset) { diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp index d8f87e15de..ef430883d4 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp @@ -198,7 +198,7 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl( } // anonymous namespace -LayerTestResult<float, 4> BatchNormFloatTest( +LayerTestResult<float, 4> BatchNormFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -244,7 +244,7 @@ LayerTestResult<float, 4> BatchNormFloatTest( armnn::DataLayout::NCHW); } -LayerTestResult<float, 4> BatchNormFloatNhwcTest( +LayerTestResult<float, 4> BatchNormFloat32NhwcTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -294,6 +294,102 @@ LayerTestResult<float, 4> BatchNormFloatNhwcTest( armnn::DataLayout::NHWC); } +LayerTestResult<armnn::Half, 4> BatchNormFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + // BatchSize: 1 + // Channels: 2 + // Height: 3 + // Width: 2 + + const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 }; + std::vector<float> inputValues + { + // Batch 0, Channel 0, Height (3) x Width (2) + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + // Batch 0, Channel 1, Height (3) x Width (2) + 1.f, 1.f, + 4.f, 1.f, + -2.f, 4.f + }; + std::vector<float> expectedOutputValues + { + // Batch 0, Channel 0, Height (3) x Width (2) + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + // Batch 0, Channel 1, Height (3) x Width (2) + 3.f, 3.f, + 4.f, 3.f, + 2.f, 4.f + }; + + return BatchNormTestImpl<armnn::DataType::Float16>( + workloadFactory, + memoryManager, + inputOutputShape, + inputValues, + expectedOutputValues, + 0.f, + 0, + armnn::DataLayout::NCHW); +} + +LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + // BatchSize: 1 + // Height: 3 + // Width: 2 + // Channels: 2 + + const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 }; + std::vector<float> inputValues + { + // Batch 0, Height 0, Width (2) x Channel (2) + 1.f, 1.f, + 4.f, 1.f, + + // Batch 0, Height 1, Width (2) x Channel (2) + 4.f, 4.f, + 2.f, 1.f, + + // Batch 0, Height 2, Width (2) x Channel (2) + 1.f, -2.f, + 6.f, 4.f + }; + std::vector<float> expectedOutputValues + { + // Batch 0, Height 0, Width (2) x Channel (2) + 1.f, 3.f, + 4.f, 3.f, + + // Batch 0, Height 1, Width (2) x Channel (2) + 4.f, 4.f, + 2.f, 3.f, + + // Batch 0, Height 2, Width (2) x Channel (2) + 1.f, 2.f, + 6.f, 4.f + }; + + return BatchNormTestImpl<armnn::DataType::Float16>( + workloadFactory, + memoryManager, + inputOutputShape, + inputValues, + expectedOutputValues, + 0.f, + 0, + armnn::DataLayout::NHWC); +} + LayerTestResult<uint8_t, 4> BatchNormUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp index 200e5d8e04..a2dacde1a9 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp @@ -7,14 +7,24 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> -LayerTestResult<float, 4> BatchNormFloatTest( +LayerTestResult<float, 4> BatchNormFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<float, 4> BatchNormFloat32NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> BatchNormFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult<float, 4> BatchNormFloatNhwcTest( +LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp index 3cfbca8441..29476e522a 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp @@ -2227,6 +2227,13 @@ LayerTestResult<float, 4> Concat4dDiffShapeDim3Test( workloadFactory, memoryManager, 0.0f, 0, useSubtensor); } +LayerTestResult<armnn::Half, 3> ConcatFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return Concat3dDim1TestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0); +} + LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp index 421d03ad18..223bf190df 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp @@ -8,6 +8,7 @@ #include "LayerTestResult.hpp" #include <ResolveType.hpp> +#include <Half.hpp> #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -22,6 +23,10 @@ LayerTestResult<float, 3> ConcatTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<armnn::Half, 3> ConcatFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<uint8_t, 3> ConcatUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp index 0316ea185b..9a110a3d34 100644 --- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp @@ -154,6 +154,110 @@ LayerTestResult<float, 4> DivisionBroadcast1DVectorTest( output); } +LayerTestResult<armnn::Half, 4> DivisionFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int width = 2u; + const unsigned int height = 2u; + const unsigned int channelCount = 2u; + const unsigned int batchSize = 2u; + + unsigned int shape[] = { batchSize, channelCount, height, width }; + + std::vector<armnn::Half> input0 = + { + 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 3._h, 3._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + std::vector<armnn::Half> input1 = + { + 1._h, 1._h, 1._h, 1._h, 2._h, 2._h, 2._h, 2._h, + 4._h, 4._h, 4._h, 4._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector<armnn::Half> output = + { + 2._h, 2._h, 2._h, 2._h, 1.50_h, 1.50_h, 1.50_h, 1.50_h, + 1._h, 1._h, 1._h, 1._h, 1.25_h, 1.25_h, 1.25_h, 1.25_h + }; + + return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult<armnn::Half, 4> DivisionBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 2, 2, 2 }; + unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector<armnn::Half> input0({ 2._h, 4._h, 6._h, 8._h, 10._h, 12._h, 14._h, 16._h}); + + std::vector<armnn::Half> input1({ 2._h }); + + std::vector<armnn::Half> output({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h}); + + return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult<armnn::Half, 4> DivisionBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 3, 3, 2 }; + unsigned int shape1[] = { 1, 1, 1, 2 }; + + std::vector<armnn::Half> input0 = + { + 1._h, 4._h, 3._h, 8._h, 5._h, 12._h, + 7._h, 16._h, 9._h, 20._h, 11._h, 24._h, + 13._h, 28._h, 15._h, 32._h, 17._h, 36._h + }; + + std::vector<armnn::Half> input1 = { 1._h, 2._h }; + + std::vector<armnn::Half> output = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h, + 13._h, 14._h, 15._h, 16._h, 17._h, 18._h + }; + + return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult<uint8_t, 4> DivisionUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp index e06b494b7d..0446f8b916 100644 --- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -26,6 +28,18 @@ LayerTestResult<float, 4> DivisionBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<armnn::Half, 4> DivisionFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> DivisionBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> DivisionBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<uint8_t, 4> DivisionUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp index fa72136255..b0b613c137 100644 --- a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp @@ -4,9 +4,10 @@ // #include "EqualTestImpl.hpp" - #include "ElementwiseTestImpl.hpp" +#include <Half.hpp> + template<> std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>( const armnn::IWorkloadFactory& workloadFactory, @@ -98,6 +99,100 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest( output); } +LayerTestResult<uint8_t, 4> EqualFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape[] = { 2, 2, 2, 2 }; + + // See dequantized values to the right. + std::vector<armnn::Half> input0({ 1._h, 1._h, 1._h, 1._h, 6._h, 6._h, 6._h, 6._h, + 3._h, 3._h, 3._h, 3._h, 7._h, 7._h, 7._h, 7._h }); + + std::vector<armnn::Half> input1({ 2._h, 2._h, 2._h, 2._h, 6._h, 6._h, 6._h, 6._h, + 3._h, 3._h, 3._h, 3._h, 5._h, 5._h, 5._h, 5._h }); + + std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 0, 0, 0 }); + + return ElementwiseTestHelper<4, + armnn::EqualQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult<uint8_t, 4> EqualBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector<armnn::Half> input0({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h }); + + std::vector<armnn::Half> input1({ 1._h }); + + std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 }); + + return ElementwiseTestHelper<4, + armnn::EqualQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector<armnn::Half> input0({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h }); + + std::vector<armnn::Half> input1({ 1._h, 1._h, 3._h }); + + std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0 }); + + return ElementwiseTestHelper<4, + armnn::EqualQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult<uint8_t, 4> EqualUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp index e9560b38bd..3ff07ba58f 100644 --- a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp @@ -21,6 +21,18 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<uint8_t, 4> EqualFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<uint8_t, 4> EqualBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<uint8_t, 4> EqualUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp index f97d51a22d..40ed8a20a8 100644 --- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp @@ -64,6 +64,12 @@ SimpleFloorTest<armnn::DataType::Float32>( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4> +SimpleFloorTest<armnn::DataType::Float16>( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + + template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4> SimpleFloorTest<armnn::DataType::QuantisedSymm16>( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp index 0118f54257..5e38e48191 100644 --- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp @@ -64,181 +64,317 @@ LayerTestResult<T, OutputDim> GatherTestImpl( return result; } -template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> -LayerTestResult<T, 1> Gather1dParamsTestImpl(armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> +struct GatherTestHelper { - armnn::TensorInfo paramsInfo({ 8 }, ArmnnType); - armnn::TensorInfo indicesInfo({ 4 }, armnn::DataType::Signed32); - armnn::TensorInfo outputInfo({ 4 }, ArmnnType); - - if (armnn::IsQuantizedType<T>()) + static LayerTestResult<T, 1> Gather1dParamsTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - paramsInfo.SetQuantizationScale(1.0f); - paramsInfo.SetQuantizationOffset(1); - outputInfo.SetQuantizationScale(1.0f); - outputInfo.SetQuantizationOffset(1); + armnn::TensorInfo paramsInfo({ 8 }, ArmnnType); + armnn::TensorInfo indicesInfo({ 4 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 4 }, ArmnnType); + + if (armnn::IsQuantizedType<T>()) + { + paramsInfo.SetQuantizationScale(1.0f); + paramsInfo.SetQuantizationOffset(1); + outputInfo.SetQuantizationScale(1.0f); + outputInfo.SetQuantizationOffset(1); + } + const std::vector<T> params = std::vector<T>({ 1, 2, 3, 4, 5, 6, 7, 8 }); + const std::vector<int32_t> indices = std::vector<int32_t>({ 0, 2, 1, 5 }); + const std::vector<T> expectedOutput = std::vector<T>({ 1, 3, 2, 6 }); + + return GatherTestImpl<ArmnnType, T, 1, 1, 1>( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); } - const std::vector<T> params = std::vector<T>({ 1, 2, 3, 4, 5, 6, 7, 8 }); - const std::vector<int32_t> indices = std::vector<int32_t>({ 0, 2, 1, 5 }); - const std::vector<T> expectedOutput = std::vector<T>({ 1, 3, 2, 6 }); - - return GatherTestImpl<ArmnnType, T, 1, 1, 1>( - workloadFactory, - memoryManager, - paramsInfo, - indicesInfo, - outputInfo, - params, - indices, - expectedOutput); -} - -template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> -LayerTestResult<T, 2> GatherMultiDimParamsTestImpl( - armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - armnn::TensorInfo paramsInfo({ 5, 2 }, ArmnnType); - armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32); - armnn::TensorInfo outputInfo({ 3, 2 }, ArmnnType); - if (armnn::IsQuantizedType<T>()) + static LayerTestResult<T, 2> GatherMultiDimParamsTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - paramsInfo.SetQuantizationScale(1.0f); - paramsInfo.SetQuantizationOffset(1); - outputInfo.SetQuantizationScale(1.0f); - outputInfo.SetQuantizationOffset(1); + armnn::TensorInfo paramsInfo({ 5, 2 }, ArmnnType); + armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 3, 2 }, ArmnnType); + + if (armnn::IsQuantizedType<T>()) + { + paramsInfo.SetQuantizationScale(1.0f); + paramsInfo.SetQuantizationOffset(1); + outputInfo.SetQuantizationScale(1.0f); + outputInfo.SetQuantizationOffset(1); + } + + const std::vector<T> params = std::vector<T>({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); + const std::vector<int32_t> indices = std::vector<int32_t>({ 1, 3, 4 }); + const std::vector<T> expectedOutput = std::vector<T>({ 3, 4, 7, 8, 9, 10 }); + + return GatherTestImpl<ArmnnType, T, 2, 1, 2>( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); } - const std::vector<T> params = std::vector<T>({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); - const std::vector<int32_t> indices = std::vector<int32_t>({ 1, 3, 4 }); - const std::vector<T> expectedOutput = std::vector<T>({ 3, 4, 7, 8, 9, 10 }); - - return GatherTestImpl<ArmnnType, T, 2, 1, 2>( - workloadFactory, - memoryManager, - paramsInfo, - indicesInfo, - outputInfo, - params, - indices, - expectedOutput); -} + static LayerTestResult<T, 4> GatherMultiDimParamsMultiDimIndicesTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + { + armnn::TensorInfo paramsInfo({ 3, 2, 3}, ArmnnType); + armnn::TensorInfo indicesInfo({ 2, 3 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 2, 3, 2, 3 }, ArmnnType); + + if (armnn::IsQuantizedType<T>()) + { + paramsInfo.SetQuantizationScale(1.0f); + paramsInfo.SetQuantizationOffset(1); + outputInfo.SetQuantizationScale(1.0f); + outputInfo.SetQuantizationOffset(1); + } + + const std::vector<T> params = + { + 1, 2, 3, + 4, 5, 6, + + 7, 8, 9, + 10, 11, 12, + + 13, 14, 15, + 16, 17, 18 + }; + + const std::vector<int32_t> indices = { 1, 2, 1, 2, 1, 0 }; + + const std::vector<T> expectedOutput = + { + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + 7, 8, 9, + 10, 11, 12, + + 13, 14, 15, + 16, 17, 18, + 7, 8, 9, + 10, 11, 12, + 1, 2, 3, + 4, 5, 6 + }; + + return GatherTestImpl<ArmnnType, T, 3, 2, 4>( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); + } +}; -template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> -LayerTestResult<T, 4> GatherMultiDimParamsMultiDimIndicesTestImpl( - armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +template<typename T> +struct GatherTestHelper<armnn::DataType::Float16, T> { - armnn::TensorInfo paramsInfo({ 3, 2, 3}, ArmnnType); - armnn::TensorInfo indicesInfo({ 2, 3 }, armnn::DataType::Signed32); - armnn::TensorInfo outputInfo({ 2, 3, 2, 3 }, ArmnnType); - - if (armnn::IsQuantizedType<T>()) + static LayerTestResult<T, 1> Gather1dParamsTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - paramsInfo.SetQuantizationScale(1.0f); - paramsInfo.SetQuantizationOffset(1); - outputInfo.SetQuantizationScale(1.0f); - outputInfo.SetQuantizationOffset(1); + using namespace half_float::literal; + + armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::Float16); + armnn::TensorInfo indicesInfo({ 4 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 4 }, armnn::DataType::Float16); + + const std::vector<T> params = std::vector<T>({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }); + const std::vector<int32_t> indices = std::vector<int32_t>({ 0, 2, 1, 5 }); + const std::vector<T> expectedOutput = std::vector<T>({ 1._h, 3._h, 2._h, 6._h }); + + return GatherTestImpl<armnn::DataType::Float16, T, 1, 1, 1>( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); } - const std::vector<T> params = + static LayerTestResult<T, 2> GatherMultiDimParamsTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - 1, 2, 3, - 4, 5, 6, - - 7, 8, 9, - 10, 11, 12, - - 13, 14, 15, - 16, 17, 18 - }; - - const std::vector<int32_t> indices = { 1, 2, 1, 2, 1, 0 }; + using namespace half_float::literal; + + armnn::TensorInfo paramsInfo({ 5, 2 }, armnn::DataType::Float16); + armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 3, 2 }, armnn::DataType::Float16); + + const std::vector<T> params = std::vector<T>({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h, 9._h, 10._h }); + + const std::vector<int32_t> indices = std::vector<int32_t>({ 1, 3, 4 }); + const std::vector<T> expectedOutput = std::vector<T>({ 3._h, 4._h, 7._h, 8._h, 9._h, 10._h }); + + return GatherTestImpl<armnn::DataType::Float16, T, 2, 1, 2>( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); + } - const std::vector<T> expectedOutput = + static LayerTestResult<T, 4> GatherMultiDimParamsMultiDimIndicesTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - 7, 8, 9, - 10, 11, 12, - 13, 14, 15, - 16, 17, 18, - 7, 8, 9, - 10, 11, 12, - - 13, 14, 15, - 16, 17, 18, - 7, 8, 9, - 10, 11, 12, - 1, 2, 3, - 4, 5, 6 - }; - - return GatherTestImpl<ArmnnType, T, 3, 2, 4>( - workloadFactory, - memoryManager, - paramsInfo, - indicesInfo, - outputInfo, - params, - indices, - expectedOutput); -} + using namespace half_float::literal; + + armnn::TensorInfo paramsInfo({ 3, 2, 3 }, armnn::DataType::Float16); + armnn::TensorInfo indicesInfo({ 2, 3 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 2, 3, 2, 3 }, armnn::DataType::Float16); + + const std::vector<T> params = + { + 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h, + + 7._h, 8._h, 9._h, + 10._h, 11._h, 12._h, + + 13._h, 14._h, 15._h, + 16._h, 17._h, 18._h + }; + + const std::vector<int32_t> indices = { 1, 2, 1, 2, 1, 0 }; + + const std::vector<T> expectedOutput = + { + 7._h, 8._h, 9._h, + 10._h, 11._h, 12._h, + 13._h, 14._h, 15._h, + 16._h, 17._h, 18._h, + 7._h, 8._h, 9._h, + 10._h, 11._h, 12._h, + + 13._h, 14._h, 15._h, + 16._h, 17._h, 18._h, + 7._h, 8._h, 9._h, + 10._h, 11._h, 12._h, + 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h + }; + + return GatherTestImpl<armnn::DataType::Float16, T, 3, 2, 4>( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); + } +}; } // anonymous namespace -LayerTestResult<float, 1> Gather1dParamsFloatTest( +LayerTestResult<float, 1> Gather1dParamsFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Gather1dParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager); + return GatherTestHelper<armnn::DataType::Float32>::Gather1dParamsTestImpl(workloadFactory, memoryManager); +} + +LayerTestResult<armnn::Half, 1> Gather1dParamsFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return GatherTestHelper<armnn::DataType::Float16>::Gather1dParamsTestImpl(workloadFactory, memoryManager); } LayerTestResult<uint8_t, 1> Gather1dParamsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Gather1dParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager); + return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::Gather1dParamsTestImpl(workloadFactory, memoryManager); } LayerTestResult<int16_t, 1> Gather1dParamsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Gather1dParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager); + return GatherTestHelper<armnn::DataType::QuantisedSymm16>::Gather1dParamsTestImpl(workloadFactory, memoryManager); +} + +LayerTestResult<float, 2> GatherMultiDimParamsFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return GatherTestHelper<armnn::DataType::Float32>::GatherMultiDimParamsTestImpl(workloadFactory, memoryManager); } -LayerTestResult<float, 2> GatherMultiDimParamsFloatTest( +LayerTestResult<armnn::Half, 2> GatherMultiDimParamsFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager); + return GatherTestHelper<armnn::DataType::Float16>::GatherMultiDimParamsTestImpl(workloadFactory, memoryManager); } LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager); + return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::GatherMultiDimParamsTestImpl( + workloadFactory, memoryManager); } LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager); + return GatherTestHelper<armnn::DataType::QuantisedSymm16>::GatherMultiDimParamsTestImpl( + workloadFactory, memoryManager); } -LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest( +LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager); + return GatherTestHelper<armnn::DataType::Float32>::GatherMultiDimParamsMultiDimIndicesTestImpl( + workloadFactory, memoryManager); +} + +LayerTestResult<armnn::Half, 4> GatherMultiDimParamsMultiDimIndicesFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return GatherTestHelper<armnn::DataType::Float16>::GatherMultiDimParamsMultiDimIndicesTestImpl( + workloadFactory, memoryManager); } LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>( + return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::GatherMultiDimParamsMultiDimIndicesTestImpl( workloadFactory, memoryManager); } @@ -246,6 +382,6 @@ LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>( + return GatherTestHelper<armnn::DataType::QuantisedSymm16>::GatherMultiDimParamsMultiDimIndicesTestImpl( workloadFactory, memoryManager); } diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp index fd12e61e2d..33df17964b 100644 --- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp @@ -7,10 +7,16 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> -LayerTestResult<float, 1> Gather1dParamsFloatTest( +LayerTestResult<float, 1> Gather1dParamsFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 1> Gather1dParamsFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -22,7 +28,11 @@ LayerTestResult<int16_t, 1> Gather1dParamsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult<float, 2> GatherMultiDimParamsFloatTest( +LayerTestResult<float, 2> GatherMultiDimParamsFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 2> GatherMultiDimParamsFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -34,7 +44,11 @@ LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest( +LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> GatherMultiDimParamsMultiDimIndicesFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp index b5bf560e3c..0148216285 100644 --- a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp @@ -4,9 +4,10 @@ // #include "GreaterTestImpl.hpp" - #include "ElementwiseTestImpl.hpp" +#include <Half.hpp> + template<> std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>( const armnn::IWorkloadFactory& workloadFactory, @@ -119,6 +120,116 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest( output); } +LayerTestResult<uint8_t, 4> GreaterFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int width = 2u; + const unsigned int height = 2u; + const unsigned int channelCount = 2u; + const unsigned int batchSize = 2u; + + unsigned int shape[] = { batchSize, channelCount, height, width }; + + std::vector<armnn::Half> input0 = + { + 1._h, 1._h, 1._h, 1._h, 5._h, 5._h, 5._h, 5._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector<armnn::Half> input1 = + { + 1._h, 1._h, 1._h, 1._h, 3._h, 3._h, 3._h, 3._h, + 5._h, 5._h, 5._h, 5._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector<uint8_t> output = + { + 0, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0 + }; + + return ElementwiseTestHelper<4, + armnn::GreaterQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 2, 2, 2 }; + unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector<armnn::Half> input0 = { 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; + std::vector<armnn::Half> input1 = { 1._h }; + + std::vector<uint8_t> output = { 0, 1, 1, 1, 1, 1, 1, 1}; + + return ElementwiseTestHelper<4, + armnn::GreaterQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector<armnn::Half> input0 = + { + 1.0_h, 2.9_h, 2.1_h, 4.0_h, 5.0_h, 6.0_h, + 7.0_h, 8.0_h, 9.0_h, 10.0_h, 11.0_h, 12.0_h + }; + + std::vector<armnn::Half> input1 = { 1._h, 3._h, 2._h }; + + std::vector<uint8_t> output = + { + 0, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1 + }; + + return ElementwiseTestHelper<4, + armnn::GreaterQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult<uint8_t, 4> GreaterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp index 39f3a39451..060fc28b9a 100644 --- a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp @@ -21,6 +21,18 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<uint8_t, 4> GreaterFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<uint8_t, 4> GreaterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp index d0e624d655..07e2befd66 100644 --- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp @@ -111,6 +111,107 @@ LayerTestResult<float, 4> MaximumBroadcast1DVectorTest( output); } +LayerTestResult<armnn::Half, 4> MaximumFloat16Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int width = 2u; + const unsigned int height = 2u; + const unsigned int channelCount = 2u; + const unsigned int batchSize = 2u; + + unsigned int shape[] = { batchSize, channelCount, height, width }; + + std::vector<armnn::Half> input0 = + { + 1._h, 1._h, 1._h, 1._h, 5._h, 5._h, 5._h, 5._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector<armnn::Half> input1 = + { + 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 3._h, 3._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + std::vector<armnn::Half> output = + { + 2._h, 2._h, 2._h, 2._h, 5._h, 5._h, 5._h, 5._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult<armnn::Half, 4> MaximumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 2, 2, 2 }; + unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector<armnn::Half> input0 = { 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; + + std::vector<armnn::Half> input1 = { 2._h }; + + std::vector<armnn::Half> output = { 2._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult<armnn::Half, 4> MaximumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector<armnn::Half> input0 = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + std::vector<armnn::Half> input1 = { 1._h, 2._h, 3._h }; + + std::vector<armnn::Half> output = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult<uint8_t, 4> MaximumUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp index b672431c15..0c7ab47925 100644 --- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -21,6 +23,18 @@ LayerTestResult<float, 4> MaximumBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<armnn::Half, 4> MaximumFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> MaximumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> MaximumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<uint8_t, 4> MaximumUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp index eabad8f852..bf66950686 100644 --- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp @@ -96,6 +96,111 @@ LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test( output); } +LayerTestResult<armnn::Half, 4> MinimumFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape[] = { 2, 2, 2, 2 }; + + std::vector<armnn::Half> input0 = + { + 1._h, 1._h, 1._h, 1._h, 6._h, 6._h, 6._h, 6._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector<armnn::Half> input1 = + { + 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 3._h, 3._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + std::vector<armnn::Half> output + { + 1._h, 1._h, 1._h, 1._h, 3._h, 3._h, 3._h, 3._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult<armnn::Half, 4> MinimumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector<armnn::Half> input0 = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + std::vector<armnn::Half> input1 = { 2._h }; + + std::vector<armnn::Half> output = + { + 1._h, 2._h, 2._h, 2._h, 2._h, 2._h, + 2._h, 2._h, 2._h, 2._h, 2._h, 2._h + }; + + return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult<armnn::Half, 4> MinimumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector<armnn::Half> input0 = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + std::vector<armnn::Half> input1 = { 1._h, 10._h, 3._h }; + + std::vector<armnn::Half> output = + { + 1._h, 2._h, 3._h, 1._h, 5._h, 3._h, + 1._h, 8._h, 3._h, 1._h, 10._h, 3._h + }; + + return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult<int16_t, 4> MinimumInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp index bb84bc0fe2..7a33e5e817 100644 --- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -22,6 +24,18 @@ LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test( armnn::IWorkloadFactory & workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager); +LayerTestResult<armnn::Half , 4> MinimumFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> MinimumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> MinimumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<int16_t , 4> MinimumInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp index c835ff2eec..3adb797bfc 100644 --- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp @@ -220,6 +220,11 @@ Rsqrt2dTest<armnn::DataType::Float32>( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2> +Rsqrt2dTest<armnn::DataType::Float16>( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2> Rsqrt2dTest<armnn::DataType::QuantisedAsymm8>( armnn::IWorkloadFactory& workloadFactory, @@ -235,6 +240,11 @@ Rsqrt3dTest<armnn::DataType::Float32>( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3> +Rsqrt3dTest<armnn::DataType::Float16>( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3> Rsqrt3dTest<armnn::DataType::QuantisedAsymm8>( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp index 49184edde9..c0b62aa640 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp @@ -632,6 +632,34 @@ LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test( data.inputShape, data.outputData, data.inputData); } +LayerTestResult<armnn::Half,2> SimpleSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta) +{ + return SimpleSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta); +} + +LayerTestResult<armnn::Half,3> Simple3dSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta) +{ + Simple3dSoftmaxOutputData data; + return Simple3dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta, + data.inputShape, data.outputData, data.inputData); +} + +LayerTestResult<armnn::Half,4> Simple4dSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta) +{ + Simple4dSoftmaxData data; + return Simple4dSoftmaxTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, beta, + data.inputShape, data.outputData, data.inputData); +} + LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp index 96f5fb94c8..2e5e244425 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -58,6 +60,21 @@ LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float beta); +LayerTestResult<armnn::Half,2> SimpleSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta); + +LayerTestResult<armnn::Half,3> Simple3dSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta); + +LayerTestResult<armnn::Half,4> Simple4dSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta); + LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp index 152ce2c06d..094ed23893 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp @@ -300,6 +300,34 @@ LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test( return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager); } +LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdSimpleTest<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiChannelsFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiBlockFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdPaddingTest<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -356,6 +384,34 @@ LayerTestResult<float, 4> SpaceToBatchNdPaddingNhwcFloat32Test( return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::Float32>(workloadFactory, memoryManager); } +LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiChannelsNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiBlockNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNhwcUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp index 0af99c51f6..cb4d8e3c52 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp @@ -6,6 +6,8 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -25,6 +27,22 @@ LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiChannelsFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiBlockFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -57,6 +75,22 @@ LayerTestResult<float, 4> SpaceToBatchNdPaddingNhwcFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<armnn::Half, 4> SpaceToBatchNdSimpleNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiChannelsNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdMultiBlockNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SpaceToBatchNdPaddingNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNhwcUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp index 92dfd97e7a..48e157dd8d 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp @@ -169,6 +169,25 @@ LayerTestResult<uint8_t, 4> SpaceToDepthNchwAsymmQ8Test( armnn::DataLayout::NCHW); } +LayerTestResult<armnn::Half, 4> SpaceToDepthNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToDepthSimpleTest1<armnn::DataType::Float16>( + workloadFactory, + memoryManager); +} + +LayerTestResult<armnn::Half, 4> SpaceToDepthNchwFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToDepthSimpleTest1<armnn::DataType::Float16>( + workloadFactory, + memoryManager, + armnn::DataLayout::NCHW); +} + LayerTestResult<float, 4> SpaceToDepthNhwcFloat32Test1( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp index ef868295bc..80ad542077 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp @@ -6,6 +6,8 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -17,6 +19,14 @@ LayerTestResult<uint8_t, 4> SpaceToDepthNhwcAsymmQ8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<armnn::Half, 4> SpaceToDepthNchwFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SpaceToDepthNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<float, 4> SpaceToDepthNhwcFloat32Test1( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp index 0278bbeb0a..1716091cb9 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp @@ -314,13 +314,20 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl( } // anonymous namespace -std::vector<LayerTestResult<float,3>> SplitterFloatTest( +std::vector<LayerTestResult<float,3>> SplitterFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager); } +std::vector<LayerTestResult<armnn::Half,3>> SplitterFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager); +} + std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -335,13 +342,20 @@ std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test( return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0); } -LayerTestResult<float, 3> CopyViaSplitterFloatTest( +LayerTestResult<float, 3> CopyViaSplitterFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0); } +LayerTestResult<armnn::Half, 3> CopyViaSplitterFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0); +} + LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp index 34c5fbaac9..00b5f7d0a7 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp @@ -5,16 +5,26 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> #include <vector> -std::vector<LayerTestResult<float, 3>> SplitterFloatTest( +std::vector<LayerTestResult<float, 3>> SplitterFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<float, 3> CopyViaSplitterFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +std::vector<LayerTestResult<armnn::Half, 3>> SplitterFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult<float, 3> CopyViaSplitterFloatTest( +LayerTestResult<armnn::Half, 3> CopyViaSplitterFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp index a2eb3a12cc..b091dd4ca7 100644 --- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp @@ -7,9 +7,7 @@ #include "LayerTestResult.hpp" -#include <ResolveType.hpp> - -#include <armnn/ArmNN.hpp> +#include <Half.hpp> #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp index 61225d40e5..d180021639 100644 --- a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp @@ -168,6 +168,82 @@ LayerTestResult<float, 4> SubtractionBroadcastTest( output); } +LayerTestResult<armnn::Half, 4> SubtractionFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 2, 2 }; + + std::vector<armnn::Half> input0 = { 1._h, 2._h, 3._h, 4._h }; + std::vector<armnn::Half> input1 = { 1._h, -1._h, 0._h, 2._h }; + std::vector<armnn::Half> output = { 0._h, 3._h, 3._h, 2._h }; + + return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult<armnn::Half, 4> SubtractionBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector<armnn::Half> input0 = { 1._h, 2._h, 3._h, 4._h }; + + std::vector<armnn::Half> input1 = { 10._h }; + + std::vector<armnn::Half> output = { -9._h, -8._h, -7._h, -6._h }; + + return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult<armnn::Half, 4> SubtractionBroadcastFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 1, 2 }; + + std::vector<armnn::Half> input0 = { 1._h, 2._h, 3._h, 4._h }; + + std::vector<armnn::Half> input1 = { 10._h, -5._h }; + + std::vector<armnn::Half> output = { -9._h, 7._h, -7._h, 9._h }; + + return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult<int16_t, 4> SubtractionInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp index ca1742b77b..e154a7b37c 100644 --- a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include <Half.hpp> + #include <backendsCommon/IBackendInternal.hpp> #include <backendsCommon/WorkloadFactory.hpp> @@ -22,6 +24,18 @@ LayerTestResult<float, 4> SubtractionBroadcastTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult<armnn::Half, 4> SubtractionFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SubtractionBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<armnn::Half, 4> SubtractionBroadcastFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult<uint8_t, 4> SubtractionUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); |