From 9bff14458f9950a5d31b9523c62c0bbf79a65fcf Mon Sep 17 00:00:00 2001 From: Matthew Jackson Date: Thu, 12 Sep 2019 09:08:23 +0100 Subject: IVGCVSW-3857 Add Reference FP16 workload support to remaining layers * Adds Reference FP16 support and unit tests for layers not already supported !referencetests:202156 Signed-off-by: Matthew Jackson Change-Id: I6fc9b9ce2809e163f72e27e877025c8fb85d9fbe --- src/backends/backendsCommon/WorkloadData.cpp | 2 + .../backendsCommon/test/QuantizeHelper.hpp | 18 + .../test/layerTests/BatchNormalizationTestImpl.cpp | 100 +++++- .../test/layerTests/BatchNormalizationTestImpl.hpp | 14 +- .../test/layerTests/ConcatTestImpl.cpp | 7 + .../test/layerTests/ConcatTestImpl.hpp | 5 + .../test/layerTests/DivisionTestImpl.cpp | 104 ++++++ .../test/layerTests/DivisionTestImpl.hpp | 14 + .../test/layerTests/EqualTestImpl.cpp | 97 +++++- .../test/layerTests/EqualTestImpl.hpp | 12 + .../test/layerTests/FloorTestImpl.cpp | 6 + .../test/layerTests/GatherTestImpl.cpp | 366 ++++++++++++++------- .../test/layerTests/GatherTestImpl.hpp | 20 +- .../test/layerTests/GreaterTestImpl.cpp | 113 ++++++- .../test/layerTests/GreaterTestImpl.hpp | 12 + .../test/layerTests/MaximumTestImpl.cpp | 101 ++++++ .../test/layerTests/MaximumTestImpl.hpp | 14 + .../test/layerTests/MinimumTestImpl.cpp | 105 ++++++ .../test/layerTests/MinimumTestImpl.hpp | 14 + .../test/layerTests/RsqrtTestImpl.cpp | 10 + .../test/layerTests/SoftmaxTestImpl.cpp | 28 ++ .../test/layerTests/SoftmaxTestImpl.hpp | 17 + .../test/layerTests/SpaceToBatchNdTestImpl.cpp | 56 ++++ .../test/layerTests/SpaceToBatchNdTestImpl.hpp | 34 ++ .../test/layerTests/SpaceToDepthTestImpl.cpp | 19 ++ .../test/layerTests/SpaceToDepthTestImpl.hpp | 10 + .../test/layerTests/SplitterTestImpl.cpp | 18 +- .../test/layerTests/SplitterTestImpl.hpp | 14 +- .../test/layerTests/StackTestImpl.hpp | 4 +- .../test/layerTests/SubtractionTestImpl.cpp | 76 +++++ .../test/layerTests/SubtractionTestImpl.hpp | 14 + src/backends/cl/test/ClLayerTests.cpp | 8 +- src/backends/neon/test/NeonLayerTests.cpp | 8 +- src/backends/reference/RefLayerSupport.cpp | 63 ++-- src/backends/reference/RefWorkloadFactory.cpp | 60 ---- .../reference/test/RefCreateWorkloadTests.cpp | 84 ++++- src/backends/reference/test/RefLayerTests.cpp | 129 ++++++-- .../reference/test/RefOptimizedNetworkTests.cpp | 61 ---- 38 files changed, 1533 insertions(+), 304 deletions(-) diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index e7e6d5235d..8361812697 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1442,6 +1442,7 @@ void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const std::vector supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedSymm16 }; @@ -2197,6 +2198,7 @@ void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadI const std::vector supportedInputTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; diff --git a/src/backends/backendsCommon/test/QuantizeHelper.hpp b/src/backends/backendsCommon/test/QuantizeHelper.hpp index a0c6553e24..b7ca3b34c0 100644 --- a/src/backends/backendsCommon/test/QuantizeHelper.hpp +++ b/src/backends/backendsCommon/test/QuantizeHelper.hpp @@ -8,6 +8,8 @@ #include #include +#include + #include #include #include @@ -45,6 +47,22 @@ struct SelectiveQuantizer } }; +template<> +struct SelectiveQuantizer +{ + static armnn::Half Quantize(float value, float scale, int32_t offset) + { + boost::ignore_unused(scale, offset); + return armnn::Half(value); + } + + static float Dequantize(armnn::Half value, float scale, int32_t offset) + { + boost::ignore_unused(scale, offset); + return value; + } +}; + template T SelectiveQuantize(float value, float scale, int32_t offset) { diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp index d8f87e15de..ef430883d4 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp @@ -198,7 +198,7 @@ LayerTestResult BatchNormTestNhwcImpl( } // anonymous namespace -LayerTestResult BatchNormFloatTest( +LayerTestResult BatchNormFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -244,7 +244,7 @@ LayerTestResult BatchNormFloatTest( armnn::DataLayout::NCHW); } -LayerTestResult BatchNormFloatNhwcTest( +LayerTestResult BatchNormFloat32NhwcTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -294,6 +294,102 @@ LayerTestResult BatchNormFloatNhwcTest( armnn::DataLayout::NHWC); } +LayerTestResult BatchNormFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + // BatchSize: 1 + // Channels: 2 + // Height: 3 + // Width: 2 + + const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 }; + std::vector inputValues + { + // Batch 0, Channel 0, Height (3) x Width (2) + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + // Batch 0, Channel 1, Height (3) x Width (2) + 1.f, 1.f, + 4.f, 1.f, + -2.f, 4.f + }; + std::vector expectedOutputValues + { + // Batch 0, Channel 0, Height (3) x Width (2) + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + // Batch 0, Channel 1, Height (3) x Width (2) + 3.f, 3.f, + 4.f, 3.f, + 2.f, 4.f + }; + + return BatchNormTestImpl( + workloadFactory, + memoryManager, + inputOutputShape, + inputValues, + expectedOutputValues, + 0.f, + 0, + armnn::DataLayout::NCHW); +} + +LayerTestResult BatchNormFloat16NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + // BatchSize: 1 + // Height: 3 + // Width: 2 + // Channels: 2 + + const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 }; + std::vector inputValues + { + // Batch 0, Height 0, Width (2) x Channel (2) + 1.f, 1.f, + 4.f, 1.f, + + // Batch 0, Height 1, Width (2) x Channel (2) + 4.f, 4.f, + 2.f, 1.f, + + // Batch 0, Height 2, Width (2) x Channel (2) + 1.f, -2.f, + 6.f, 4.f + }; + std::vector expectedOutputValues + { + // Batch 0, Height 0, Width (2) x Channel (2) + 1.f, 3.f, + 4.f, 3.f, + + // Batch 0, Height 1, Width (2) x Channel (2) + 4.f, 4.f, + 2.f, 3.f, + + // Batch 0, Height 2, Width (2) x Channel (2) + 1.f, 2.f, + 6.f, 4.f + }; + + return BatchNormTestImpl( + workloadFactory, + memoryManager, + inputOutputShape, + inputValues, + expectedOutputValues, + 0.f, + 0, + armnn::DataLayout::NHWC); +} + LayerTestResult BatchNormUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp index 200e5d8e04..a2dacde1a9 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp @@ -7,14 +7,24 @@ #include "LayerTestResult.hpp" +#include + #include #include -LayerTestResult BatchNormFloatTest( +LayerTestResult BatchNormFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult BatchNormFloat32NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult BatchNormFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult BatchNormFloatNhwcTest( +LayerTestResult BatchNormFloat16NhwcTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp index 3cfbca8441..29476e522a 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp @@ -2227,6 +2227,13 @@ LayerTestResult Concat4dDiffShapeDim3Test( workloadFactory, memoryManager, 0.0f, 0, useSubtensor); } +LayerTestResult ConcatFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return Concat3dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); +} + LayerTestResult ConcatUint8DifferentQParamsTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp index 421d03ad18..223bf190df 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.hpp @@ -8,6 +8,7 @@ #include "LayerTestResult.hpp" #include +#include #include #include @@ -22,6 +23,10 @@ LayerTestResult ConcatTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult ConcatFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult ConcatUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp index 0316ea185b..9a110a3d34 100644 --- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp @@ -154,6 +154,110 @@ LayerTestResult DivisionBroadcast1DVectorTest( output); } +LayerTestResult DivisionFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int width = 2u; + const unsigned int height = 2u; + const unsigned int channelCount = 2u; + const unsigned int batchSize = 2u; + + unsigned int shape[] = { batchSize, channelCount, height, width }; + + std::vector input0 = + { + 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 3._h, 3._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + std::vector input1 = + { + 1._h, 1._h, 1._h, 1._h, 2._h, 2._h, 2._h, 2._h, + 4._h, 4._h, 4._h, 4._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector output = + { + 2._h, 2._h, 2._h, 2._h, 1.50_h, 1.50_h, 1.50_h, 1.50_h, + 1._h, 1._h, 1._h, 1._h, 1.25_h, 1.25_h, 1.25_h, 1.25_h + }; + + return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult DivisionBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 2, 2, 2 }; + unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0({ 2._h, 4._h, 6._h, 8._h, 10._h, 12._h, 14._h, 16._h}); + + std::vector input1({ 2._h }); + + std::vector output({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h}); + + return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult DivisionBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 3, 3, 2 }; + unsigned int shape1[] = { 1, 1, 1, 2 }; + + std::vector input0 = + { + 1._h, 4._h, 3._h, 8._h, 5._h, 12._h, + 7._h, 16._h, 9._h, 20._h, 11._h, 24._h, + 13._h, 28._h, 15._h, 32._h, 17._h, 36._h + }; + + std::vector input1 = { 1._h, 2._h }; + + std::vector output = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h, + 13._h, 14._h, 15._h, 16._h, 17._h, 18._h + }; + + return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult DivisionUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp index e06b494b7d..0446f8b916 100644 --- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include + #include #include @@ -26,6 +28,18 @@ LayerTestResult DivisionBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult DivisionFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult DivisionBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult DivisionBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult DivisionUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp index fa72136255..b0b613c137 100644 --- a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp @@ -4,9 +4,10 @@ // #include "EqualTestImpl.hpp" - #include "ElementwiseTestImpl.hpp" +#include + template<> std::unique_ptr CreateWorkload( const armnn::IWorkloadFactory& workloadFactory, @@ -98,6 +99,100 @@ LayerTestResult EqualBroadcast1DVectorTest( output); } +LayerTestResult EqualFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape[] = { 2, 2, 2, 2 }; + + // See dequantized values to the right. + std::vector input0({ 1._h, 1._h, 1._h, 1._h, 6._h, 6._h, 6._h, 6._h, + 3._h, 3._h, 3._h, 3._h, 7._h, 7._h, 7._h, 7._h }); + + std::vector input1({ 2._h, 2._h, 2._h, 2._h, 6._h, 6._h, 6._h, 6._h, + 3._h, 3._h, 3._h, 3._h, 5._h, 5._h, 5._h, 5._h }); + + std::vector output({ 0, 0, 0, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 0, 0, 0 }); + + return ElementwiseTestHelper<4, + armnn::EqualQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult EqualBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h }); + + std::vector input1({ 1._h }); + + std::vector output({ 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 }); + + return ElementwiseTestHelper<4, + armnn::EqualQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult EqualBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector input0({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h }); + + std::vector input1({ 1._h, 1._h, 3._h }); + + std::vector output({ 1, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0 }); + + return ElementwiseTestHelper<4, + armnn::EqualQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult EqualUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp index e9560b38bd..3ff07ba58f 100644 --- a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp @@ -21,6 +21,18 @@ LayerTestResult EqualBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult EqualFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult EqualBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult EqualBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult EqualUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp index f97d51a22d..40ed8a20a8 100644 --- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp @@ -64,6 +64,12 @@ SimpleFloorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 4> +SimpleFloorTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + + template LayerTestResult, 4> SimpleFloorTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp index 0118f54257..5e38e48191 100644 --- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp @@ -64,181 +64,317 @@ LayerTestResult GatherTestImpl( return result; } -template > -LayerTestResult Gather1dParamsTestImpl(armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +template> +struct GatherTestHelper { - armnn::TensorInfo paramsInfo({ 8 }, ArmnnType); - armnn::TensorInfo indicesInfo({ 4 }, armnn::DataType::Signed32); - armnn::TensorInfo outputInfo({ 4 }, ArmnnType); - - if (armnn::IsQuantizedType()) + static LayerTestResult Gather1dParamsTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - paramsInfo.SetQuantizationScale(1.0f); - paramsInfo.SetQuantizationOffset(1); - outputInfo.SetQuantizationScale(1.0f); - outputInfo.SetQuantizationOffset(1); + armnn::TensorInfo paramsInfo({ 8 }, ArmnnType); + armnn::TensorInfo indicesInfo({ 4 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 4 }, ArmnnType); + + if (armnn::IsQuantizedType()) + { + paramsInfo.SetQuantizationScale(1.0f); + paramsInfo.SetQuantizationOffset(1); + outputInfo.SetQuantizationScale(1.0f); + outputInfo.SetQuantizationOffset(1); + } + const std::vector params = std::vector({ 1, 2, 3, 4, 5, 6, 7, 8 }); + const std::vector indices = std::vector({ 0, 2, 1, 5 }); + const std::vector expectedOutput = std::vector({ 1, 3, 2, 6 }); + + return GatherTestImpl( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); } - const std::vector params = std::vector({ 1, 2, 3, 4, 5, 6, 7, 8 }); - const std::vector indices = std::vector({ 0, 2, 1, 5 }); - const std::vector expectedOutput = std::vector({ 1, 3, 2, 6 }); - - return GatherTestImpl( - workloadFactory, - memoryManager, - paramsInfo, - indicesInfo, - outputInfo, - params, - indices, - expectedOutput); -} - -template > -LayerTestResult GatherMultiDimParamsTestImpl( - armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - armnn::TensorInfo paramsInfo({ 5, 2 }, ArmnnType); - armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32); - armnn::TensorInfo outputInfo({ 3, 2 }, ArmnnType); - if (armnn::IsQuantizedType()) + static LayerTestResult GatherMultiDimParamsTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - paramsInfo.SetQuantizationScale(1.0f); - paramsInfo.SetQuantizationOffset(1); - outputInfo.SetQuantizationScale(1.0f); - outputInfo.SetQuantizationOffset(1); + armnn::TensorInfo paramsInfo({ 5, 2 }, ArmnnType); + armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 3, 2 }, ArmnnType); + + if (armnn::IsQuantizedType()) + { + paramsInfo.SetQuantizationScale(1.0f); + paramsInfo.SetQuantizationOffset(1); + outputInfo.SetQuantizationScale(1.0f); + outputInfo.SetQuantizationOffset(1); + } + + const std::vector params = std::vector({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); + const std::vector indices = std::vector({ 1, 3, 4 }); + const std::vector expectedOutput = std::vector({ 3, 4, 7, 8, 9, 10 }); + + return GatherTestImpl( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); } - const std::vector params = std::vector({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }); - const std::vector indices = std::vector({ 1, 3, 4 }); - const std::vector expectedOutput = std::vector({ 3, 4, 7, 8, 9, 10 }); - - return GatherTestImpl( - workloadFactory, - memoryManager, - paramsInfo, - indicesInfo, - outputInfo, - params, - indices, - expectedOutput); -} + static LayerTestResult GatherMultiDimParamsMultiDimIndicesTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + { + armnn::TensorInfo paramsInfo({ 3, 2, 3}, ArmnnType); + armnn::TensorInfo indicesInfo({ 2, 3 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 2, 3, 2, 3 }, ArmnnType); + + if (armnn::IsQuantizedType()) + { + paramsInfo.SetQuantizationScale(1.0f); + paramsInfo.SetQuantizationOffset(1); + outputInfo.SetQuantizationScale(1.0f); + outputInfo.SetQuantizationOffset(1); + } + + const std::vector params = + { + 1, 2, 3, + 4, 5, 6, + + 7, 8, 9, + 10, 11, 12, + + 13, 14, 15, + 16, 17, 18 + }; + + const std::vector indices = { 1, 2, 1, 2, 1, 0 }; + + const std::vector expectedOutput = + { + 7, 8, 9, + 10, 11, 12, + 13, 14, 15, + 16, 17, 18, + 7, 8, 9, + 10, 11, 12, + + 13, 14, 15, + 16, 17, 18, + 7, 8, 9, + 10, 11, 12, + 1, 2, 3, + 4, 5, 6 + }; + + return GatherTestImpl( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); + } +}; -template > -LayerTestResult GatherMultiDimParamsMultiDimIndicesTestImpl( - armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +template +struct GatherTestHelper { - armnn::TensorInfo paramsInfo({ 3, 2, 3}, ArmnnType); - armnn::TensorInfo indicesInfo({ 2, 3 }, armnn::DataType::Signed32); - armnn::TensorInfo outputInfo({ 2, 3, 2, 3 }, ArmnnType); - - if (armnn::IsQuantizedType()) + static LayerTestResult Gather1dParamsTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - paramsInfo.SetQuantizationScale(1.0f); - paramsInfo.SetQuantizationOffset(1); - outputInfo.SetQuantizationScale(1.0f); - outputInfo.SetQuantizationOffset(1); + using namespace half_float::literal; + + armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::Float16); + armnn::TensorInfo indicesInfo({ 4 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 4 }, armnn::DataType::Float16); + + const std::vector params = std::vector({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }); + const std::vector indices = std::vector({ 0, 2, 1, 5 }); + const std::vector expectedOutput = std::vector({ 1._h, 3._h, 2._h, 6._h }); + + return GatherTestImpl( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); } - const std::vector params = + static LayerTestResult GatherMultiDimParamsTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - 1, 2, 3, - 4, 5, 6, - - 7, 8, 9, - 10, 11, 12, - - 13, 14, 15, - 16, 17, 18 - }; - - const std::vector indices = { 1, 2, 1, 2, 1, 0 }; + using namespace half_float::literal; + + armnn::TensorInfo paramsInfo({ 5, 2 }, armnn::DataType::Float16); + armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 3, 2 }, armnn::DataType::Float16); + + const std::vector params = std::vector({ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h, 9._h, 10._h }); + + const std::vector indices = std::vector({ 1, 3, 4 }); + const std::vector expectedOutput = std::vector({ 3._h, 4._h, 7._h, 8._h, 9._h, 10._h }); + + return GatherTestImpl( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); + } - const std::vector expectedOutput = + static LayerTestResult GatherMultiDimParamsMultiDimIndicesTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - 7, 8, 9, - 10, 11, 12, - 13, 14, 15, - 16, 17, 18, - 7, 8, 9, - 10, 11, 12, - - 13, 14, 15, - 16, 17, 18, - 7, 8, 9, - 10, 11, 12, - 1, 2, 3, - 4, 5, 6 - }; - - return GatherTestImpl( - workloadFactory, - memoryManager, - paramsInfo, - indicesInfo, - outputInfo, - params, - indices, - expectedOutput); -} + using namespace half_float::literal; + + armnn::TensorInfo paramsInfo({ 3, 2, 3 }, armnn::DataType::Float16); + armnn::TensorInfo indicesInfo({ 2, 3 }, armnn::DataType::Signed32); + armnn::TensorInfo outputInfo({ 2, 3, 2, 3 }, armnn::DataType::Float16); + + const std::vector params = + { + 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h, + + 7._h, 8._h, 9._h, + 10._h, 11._h, 12._h, + + 13._h, 14._h, 15._h, + 16._h, 17._h, 18._h + }; + + const std::vector indices = { 1, 2, 1, 2, 1, 0 }; + + const std::vector expectedOutput = + { + 7._h, 8._h, 9._h, + 10._h, 11._h, 12._h, + 13._h, 14._h, 15._h, + 16._h, 17._h, 18._h, + 7._h, 8._h, 9._h, + 10._h, 11._h, 12._h, + + 13._h, 14._h, 15._h, + 16._h, 17._h, 18._h, + 7._h, 8._h, 9._h, + 10._h, 11._h, 12._h, + 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h + }; + + return GatherTestImpl( + workloadFactory, + memoryManager, + paramsInfo, + indicesInfo, + outputInfo, + params, + indices, + expectedOutput); + } +}; } // anonymous namespace -LayerTestResult Gather1dParamsFloatTest( +LayerTestResult Gather1dParamsFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Gather1dParamsTestImpl(workloadFactory, memoryManager); + return GatherTestHelper::Gather1dParamsTestImpl(workloadFactory, memoryManager); +} + +LayerTestResult Gather1dParamsFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return GatherTestHelper::Gather1dParamsTestImpl(workloadFactory, memoryManager); } LayerTestResult Gather1dParamsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Gather1dParamsTestImpl(workloadFactory, memoryManager); + return GatherTestHelper::Gather1dParamsTestImpl(workloadFactory, memoryManager); } LayerTestResult Gather1dParamsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Gather1dParamsTestImpl(workloadFactory, memoryManager); + return GatherTestHelper::Gather1dParamsTestImpl(workloadFactory, memoryManager); +} + +LayerTestResult GatherMultiDimParamsFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return GatherTestHelper::GatherMultiDimParamsTestImpl(workloadFactory, memoryManager); } -LayerTestResult GatherMultiDimParamsFloatTest( +LayerTestResult GatherMultiDimParamsFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsTestImpl(workloadFactory, memoryManager); + return GatherTestHelper::GatherMultiDimParamsTestImpl(workloadFactory, memoryManager); } LayerTestResult GatherMultiDimParamsUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsTestImpl(workloadFactory, memoryManager); + return GatherTestHelper::GatherMultiDimParamsTestImpl( + workloadFactory, memoryManager); } LayerTestResult GatherMultiDimParamsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsTestImpl(workloadFactory, memoryManager); + return GatherTestHelper::GatherMultiDimParamsTestImpl( + workloadFactory, memoryManager); } -LayerTestResult GatherMultiDimParamsMultiDimIndicesFloatTest( +LayerTestResult GatherMultiDimParamsMultiDimIndicesFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsMultiDimIndicesTestImpl(workloadFactory, memoryManager); + return GatherTestHelper::GatherMultiDimParamsMultiDimIndicesTestImpl( + workloadFactory, memoryManager); +} + +LayerTestResult GatherMultiDimParamsMultiDimIndicesFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return GatherTestHelper::GatherMultiDimParamsMultiDimIndicesTestImpl( + workloadFactory, memoryManager); } LayerTestResult GatherMultiDimParamsMultiDimIndicesUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsMultiDimIndicesTestImpl( + return GatherTestHelper::GatherMultiDimParamsMultiDimIndicesTestImpl( workloadFactory, memoryManager); } @@ -246,6 +382,6 @@ LayerTestResult GatherMultiDimParamsMultiDimIndicesInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return GatherMultiDimParamsMultiDimIndicesTestImpl( + return GatherTestHelper::GatherMultiDimParamsMultiDimIndicesTestImpl( workloadFactory, memoryManager); } diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp index fd12e61e2d..33df17964b 100644 --- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp @@ -7,10 +7,16 @@ #include "LayerTestResult.hpp" +#include + #include #include -LayerTestResult Gather1dParamsFloatTest( +LayerTestResult Gather1dParamsFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult Gather1dParamsFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -22,7 +28,11 @@ LayerTestResult Gather1dParamsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult GatherMultiDimParamsFloatTest( +LayerTestResult GatherMultiDimParamsFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult GatherMultiDimParamsFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -34,7 +44,11 @@ LayerTestResult GatherMultiDimParamsInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult GatherMultiDimParamsMultiDimIndicesFloatTest( +LayerTestResult GatherMultiDimParamsMultiDimIndicesFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult GatherMultiDimParamsMultiDimIndicesFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp index b5bf560e3c..0148216285 100644 --- a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp @@ -4,9 +4,10 @@ // #include "GreaterTestImpl.hpp" - #include "ElementwiseTestImpl.hpp" +#include + template<> std::unique_ptr CreateWorkload( const armnn::IWorkloadFactory& workloadFactory, @@ -119,6 +120,116 @@ LayerTestResult GreaterBroadcast1DVectorTest( output); } +LayerTestResult GreaterFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int width = 2u; + const unsigned int height = 2u; + const unsigned int channelCount = 2u; + const unsigned int batchSize = 2u; + + unsigned int shape[] = { batchSize, channelCount, height, width }; + + std::vector input0 = + { + 1._h, 1._h, 1._h, 1._h, 5._h, 5._h, 5._h, 5._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector input1 = + { + 1._h, 1._h, 1._h, 1._h, 3._h, 3._h, 3._h, 3._h, + 5._h, 5._h, 5._h, 5._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector output = + { + 0, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0 + }; + + return ElementwiseTestHelper<4, + armnn::GreaterQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult GreaterBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 2, 2, 2 }; + unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0 = { 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; + std::vector input1 = { 1._h }; + + std::vector output = { 0, 1, 1, 1, 1, 1, 1, 1}; + + return ElementwiseTestHelper<4, + armnn::GreaterQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult GreaterBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector input0 = + { + 1.0_h, 2.9_h, 2.1_h, 4.0_h, 5.0_h, 6.0_h, + 7.0_h, 8.0_h, 9.0_h, 10.0_h, 11.0_h, 12.0_h + }; + + std::vector input1 = { 1._h, 3._h, 2._h }; + + std::vector output = + { + 0, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1 + }; + + return ElementwiseTestHelper<4, + armnn::GreaterQueueDescriptor, + armnn::DataType::Float16, + armnn::DataType::Boolean>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult GreaterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp index 39f3a39451..060fc28b9a 100644 --- a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp @@ -21,6 +21,18 @@ LayerTestResult GreaterBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult GreaterFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult GreaterBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult GreaterBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult GreaterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp index d0e624d655..07e2befd66 100644 --- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp @@ -111,6 +111,107 @@ LayerTestResult MaximumBroadcast1DVectorTest( output); } +LayerTestResult MaximumFloat16Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int width = 2u; + const unsigned int height = 2u; + const unsigned int channelCount = 2u; + const unsigned int batchSize = 2u; + + unsigned int shape[] = { batchSize, channelCount, height, width }; + + std::vector input0 = + { + 1._h, 1._h, 1._h, 1._h, 5._h, 5._h, 5._h, 5._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector input1 = + { + 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 3._h, 3._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + std::vector output = + { + 2._h, 2._h, 2._h, 2._h, 5._h, 5._h, 5._h, 5._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult MaximumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 2, 2, 2 }; + unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0 = { 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; + + std::vector input1 = { 2._h }; + + std::vector output = { 2._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult MaximumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector input0 = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + std::vector input1 = { 1._h, 2._h, 3._h }; + + std::vector output = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult MaximumUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp index b672431c15..0c7ab47925 100644 --- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include + #include #include @@ -21,6 +23,18 @@ LayerTestResult MaximumBroadcast1DVectorTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult MaximumFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult MaximumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult MaximumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult MaximumUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp index eabad8f852..bf66950686 100644 --- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp @@ -96,6 +96,111 @@ LayerTestResult MinimumBroadcast1DVectorUint8Test( output); } +LayerTestResult MinimumFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape[] = { 2, 2, 2, 2 }; + + std::vector input0 = + { + 1._h, 1._h, 1._h, 1._h, 6._h, 6._h, 6._h, 6._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector input1 = + { + 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 3._h, 3._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + std::vector output + { + 1._h, 1._h, 1._h, 1._h, 3._h, 3._h, 3._h, 3._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult MinimumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0 = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + std::vector input1 = { 2._h }; + + std::vector output = + { + 1._h, 2._h, 2._h, 2._h, 2._h, 2._h, + 2._h, 2._h, 2._h, 2._h, 2._h, 2._h + }; + + return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult MinimumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector input0 = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + std::vector input1 = { 1._h, 10._h, 3._h }; + + std::vector output = + { + 1._h, 2._h, 3._h, 1._h, 5._h, 3._h, + 1._h, 8._h, 3._h, 1._h, 10._h, 3._h + }; + + return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult MinimumInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp index bb84bc0fe2..7a33e5e817 100644 --- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include + #include #include @@ -22,6 +24,18 @@ LayerTestResult MinimumBroadcast1DVectorUint8Test( armnn::IWorkloadFactory & workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager); +LayerTestResult MinimumFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult MinimumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult MinimumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult MinimumInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp index c835ff2eec..3adb797bfc 100644 --- a/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp @@ -220,6 +220,11 @@ Rsqrt2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 2> +Rsqrt2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 2> Rsqrt2dTest( armnn::IWorkloadFactory& workloadFactory, @@ -235,6 +240,11 @@ Rsqrt3dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +template LayerTestResult, 3> +Rsqrt3dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + template LayerTestResult, 3> Rsqrt3dTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp index 49184edde9..c0b62aa640 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp @@ -632,6 +632,34 @@ LayerTestResult Simple4dSoftmaxUint8Test( data.inputShape, data.outputData, data.inputData); } +LayerTestResult SimpleSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta) +{ + return SimpleSoftmaxTestImpl(workloadFactory, memoryManager, beta); +} + +LayerTestResult Simple3dSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta) +{ + Simple3dSoftmaxOutputData data; + return Simple3dSoftmaxTestImpl(workloadFactory, memoryManager, beta, + data.inputShape, data.outputData, data.inputData); +} + +LayerTestResult Simple4dSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta) +{ + Simple4dSoftmaxData data; + return Simple4dSoftmaxTestImpl(workloadFactory, memoryManager, beta, + data.inputShape, data.outputData, data.inputData); +} + LayerTestResult SimpleSoftmaxUint16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp index 96f5fb94c8..2e5e244425 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include + #include #include @@ -58,6 +60,21 @@ LayerTestResult Simple4dSoftmaxUint8Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float beta); +LayerTestResult SimpleSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta); + +LayerTestResult Simple3dSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta); + +LayerTestResult Simple4dSoftmaxFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + float beta); + LayerTestResult SimpleSoftmaxUint16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp index 152ce2c06d..094ed23893 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp @@ -300,6 +300,34 @@ LayerTestResult SpaceToBatchNdPaddingFloat32Test( return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager); } +LayerTestResult SpaceToBatchNdSimpleFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdSimpleTest(workloadFactory, memoryManager); +} + +LayerTestResult SpaceToBatchNdMultiChannelsFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdMultiChannelsTest(workloadFactory, memoryManager); +} + +LayerTestResult SpaceToBatchNdMultiBlockFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdMultiBlockTest(workloadFactory, memoryManager); +} + +LayerTestResult SpaceToBatchNdPaddingFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdPaddingTest(workloadFactory, memoryManager); +} + LayerTestResult SpaceToBatchNdSimpleUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -356,6 +384,34 @@ LayerTestResult SpaceToBatchNdPaddingNhwcFloat32Test( return SpaceToBatchNdPaddingNhwcTest(workloadFactory, memoryManager); } +LayerTestResult SpaceToBatchNdSimpleNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdSimpleNhwcTest(workloadFactory, memoryManager); +} + +LayerTestResult SpaceToBatchNdMultiChannelsNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdMultiChannelsNhwcTest(workloadFactory, memoryManager); +} + +LayerTestResult SpaceToBatchNdMultiBlockNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdMultiBlockNhwcTest(workloadFactory, memoryManager); +} + +LayerTestResult SpaceToBatchNdPaddingNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToBatchNdPaddingNhwcTest(workloadFactory, memoryManager); +} + LayerTestResult SpaceToBatchNdSimpleNhwcUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp index 0af99c51f6..cb4d8e3c52 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp @@ -6,6 +6,8 @@ #include "LayerTestResult.hpp" +#include + #include #include @@ -25,6 +27,22 @@ LayerTestResult SpaceToBatchNdPaddingFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult SpaceToBatchNdSimpleFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SpaceToBatchNdMultiChannelsFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SpaceToBatchNdMultiBlockFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SpaceToBatchNdPaddingFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult SpaceToBatchNdSimpleUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -57,6 +75,22 @@ LayerTestResult SpaceToBatchNdPaddingNhwcFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult SpaceToBatchNdSimpleNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SpaceToBatchNdMultiChannelsNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SpaceToBatchNdMultiBlockNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SpaceToBatchNdPaddingNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult SpaceToBatchNdSimpleNhwcUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp index 92dfd97e7a..48e157dd8d 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp @@ -169,6 +169,25 @@ LayerTestResult SpaceToDepthNchwAsymmQ8Test( armnn::DataLayout::NCHW); } +LayerTestResult SpaceToDepthNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToDepthSimpleTest1( + workloadFactory, + memoryManager); +} + +LayerTestResult SpaceToDepthNchwFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SpaceToDepthSimpleTest1( + workloadFactory, + memoryManager, + armnn::DataLayout::NCHW); +} + LayerTestResult SpaceToDepthNhwcFloat32Test1( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp index ef868295bc..80ad542077 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp @@ -6,6 +6,8 @@ #include "LayerTestResult.hpp" +#include + #include #include @@ -17,6 +19,14 @@ LayerTestResult SpaceToDepthNhwcAsymmQ8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult SpaceToDepthNchwFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SpaceToDepthNhwcFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult SpaceToDepthNhwcFloat32Test1( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp index 0278bbeb0a..1716091cb9 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp @@ -314,13 +314,20 @@ LayerTestResult CopyViaSplitterTestImpl( } // anonymous namespace -std::vector> SplitterFloatTest( +std::vector> SplitterFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SplitterTestCommon(workloadFactory, memoryManager); } +std::vector> SplitterFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return SplitterTestCommon(workloadFactory, memoryManager); +} + std::vector> SplitterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -335,13 +342,20 @@ std::vector> SplitterInt16Test( return SplitterTestCommon(workloadFactory, memoryManager, 1.0f, 0); } -LayerTestResult CopyViaSplitterFloatTest( +LayerTestResult CopyViaSplitterFloat32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return CopyViaSplitterTestImpl(workloadFactory, memoryManager, 0.0f, 0); } +LayerTestResult CopyViaSplitterFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return CopyViaSplitterTestImpl(workloadFactory, memoryManager, 0.0f, 0); +} + LayerTestResult CopyViaSplitterUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp index 34c5fbaac9..00b5f7d0a7 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.hpp @@ -5,16 +5,26 @@ #include "LayerTestResult.hpp" +#include + #include #include #include -std::vector> SplitterFloatTest( +std::vector> SplitterFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult CopyViaSplitterFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +std::vector> SplitterFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult CopyViaSplitterFloatTest( +LayerTestResult CopyViaSplitterFloat16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp index a2eb3a12cc..b091dd4ca7 100644 --- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.hpp @@ -7,9 +7,7 @@ #include "LayerTestResult.hpp" -#include - -#include +#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp index 61225d40e5..d180021639 100644 --- a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp @@ -168,6 +168,82 @@ LayerTestResult SubtractionBroadcastTest( output); } +LayerTestResult SubtractionFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 2, 2 }; + + std::vector input0 = { 1._h, 2._h, 3._h, 4._h }; + std::vector input1 = { 1._h, -1._h, 0._h, 2._h }; + std::vector output = { 0._h, 3._h, 3._h, 2._h }; + + return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult SubtractionBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0 = { 1._h, 2._h, 3._h, 4._h }; + + std::vector input1 = { 10._h }; + + std::vector output = { -9._h, -8._h, -7._h, -6._h }; + + return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult SubtractionBroadcastFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 1, 2 }; + + std::vector input0 = { 1._h, 2._h, 3._h, 4._h }; + + std::vector input1 = { 10._h, -5._h }; + + std::vector output = { -9._h, 7._h, -7._h, 9._h }; + + return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult SubtractionInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp index ca1742b77b..e154a7b37c 100644 --- a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp @@ -7,6 +7,8 @@ #include "LayerTestResult.hpp" +#include + #include #include @@ -22,6 +24,18 @@ LayerTestResult SubtractionBroadcastTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult SubtractionFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SubtractionBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult SubtractionBroadcastFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult SubtractionUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 7274afc957..b3f29b6380 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -177,10 +177,10 @@ ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetricNhwc, ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test); // Splitter -ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat, SplitterFloatTest) +ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat32, SplitterFloat32Test) ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) -ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat, CopyViaSplitterFloatTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat32, CopyViaSplitterFloat32Test) ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) // Concat @@ -287,8 +287,8 @@ ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadca ARMNN_AUTO_TEST_CASE(Multiplication5d, Multiplication5dTest) // Batch Norm -ARMNN_AUTO_TEST_CASE(BatchNormFloat, BatchNormFloatTest) -ARMNN_AUTO_TEST_CASE(BatchNormFloatNhwc, BatchNormFloatNhwcTest) +ARMNN_AUTO_TEST_CASE(BatchNormFloat32, BatchNormFloat32Test) +ARMNN_AUTO_TEST_CASE(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest) // L2 Normalization ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW) diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 056656769f..8a513347cf 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -380,10 +380,10 @@ ARMNN_AUTO_TEST_CASE(SpaceToDepthNhwcQSymm16, SpaceToDepthNhwcQSymm16Test) ARMNN_AUTO_TEST_CASE(SpaceToDepthNchwQSymm16, SpaceToDepthNchwQSymm16Test) // Splitter -ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat, SplitterFloatTest) +ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat32, SplitterFloat32Test) ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) -ARMNN_AUTO_TEST_CASE(CopyViaSplitteFloatr, CopyViaSplitterFloatTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat32, CopyViaSplitterFloat32Test) ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) // Concat @@ -425,8 +425,8 @@ ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadca ARMNN_AUTO_TEST_CASE(Multiplication5d, Multiplication5dTest) // Batch Norm -ARMNN_AUTO_TEST_CASE(BatchNormFloat, BatchNormFloatTest) -ARMNN_AUTO_TEST_CASE(BatchNormFloatNhwc, BatchNormFloatNhwcTest) +ARMNN_AUTO_TEST_CASE(BatchNormFloat32, BatchNormFloat32Test) +ARMNN_AUTO_TEST_CASE(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest) // Constant ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 4958968175..465d45cbae 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -70,9 +70,10 @@ bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo& Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -202,9 +203,10 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, { ignore_unused(descriptor); - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -249,9 +251,10 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, std::string outputTensorStr = "output"; // Define supported types. - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -290,9 +293,10 @@ bool RefLayerSupport::IsConcatSupported(const std::vector inp ignore_unused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -556,8 +560,9 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -590,9 +595,10 @@ bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -637,9 +643,10 @@ bool RefLayerSupport::IsFloorSupported(const TensorInfo& input, ignore_unused(output); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedSymm16 }; @@ -716,9 +723,10 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, armnn::Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -745,9 +753,10 @@ bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -923,8 +932,9 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1053,8 +1063,9 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1288,9 +1299,10 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1313,9 +1325,10 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1337,9 +1350,10 @@ bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1366,9 +1380,10 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input, { ignore_unused(output); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1392,9 +1407,10 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input, { ignore_unused(output); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1420,9 +1436,10 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, ignore_unused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1445,9 +1462,10 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { ignore_unused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1465,9 +1483,10 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, { ignore_unused(descriptor); bool supported = true; - std::array supportedTypes = + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1551,8 +1570,9 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, { bool supported = true; - std::array supportedTypes = { + std::array supportedTypes = { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; @@ -1585,9 +1605,10 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, { bool supported = true; - std::array supportedTypes + std::array supportedTypes { DataType::Float32, + DataType::Float16, DataType::QuantisedAsymm8, DataType::QuantisedSymm16 }; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index a3d4bf08c2..52dffcc1f8 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -137,20 +137,12 @@ std::unique_ptr RefWorkloadFactory::CreateActivation(const Activation std::unique_ptr RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } std::unique_ptr RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -248,10 +240,6 @@ std::unique_ptr RefWorkloadFactory::CreateMemImport(const MemI std::unique_ptr RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -283,10 +271,6 @@ std::unique_ptr RefWorkloadFactory::CreateL2Normalization(const L2Nor std::unique_ptr RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -305,20 +289,12 @@ std::unique_ptr RefWorkloadFactory::CreateReshape(const ReshapeQueueD std::unique_ptr RefWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } std::unique_ptr RefWorkloadFactory::CreateSpaceToDepth(const armnn::SpaceToDepthQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -351,30 +327,18 @@ std::unique_ptr RefWorkloadFactory::CreateConvertFp32ToFp16( std::unique_ptr RefWorkloadFactory::CreateDivision( const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } std::unique_ptr RefWorkloadFactory::CreateSubtraction( const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } std::unique_ptr RefWorkloadFactory::CreateMaximum( const MaximumQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -387,10 +351,6 @@ std::unique_ptr RefWorkloadFactory::CreateMean( std::unique_ptr RefWorkloadFactory::CreateMinimum( const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -417,10 +377,6 @@ std::unique_ptr RefWorkloadFactory::CreateEqual(const EqualQueueDescr std::unique_ptr RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -449,20 +405,12 @@ std::unique_ptr RefWorkloadFactory::CreateDebug(const DebugQueueDescr std::unique_ptr RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } std::unique_ptr RefWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -487,10 +435,6 @@ std::unique_ptr RefWorkloadFactory::CreateDequantize(const Dequantize std::unique_ptr RefWorkloadFactory::CreatePrelu(const PreluQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } @@ -510,10 +454,6 @@ std::unique_ptr RefWorkloadFactory::CreateStack(const StackQueueDescr std::unique_ptr RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) const { - if (IsFloat16(info)) - { - return MakeWorkload(descriptor, info); - } return std::make_unique(descriptor, info); } diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 04c9acbe39..580d8550f0 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -112,7 +112,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload) armnn::DataType::QuantisedSymm16>(); } -BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload) +BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload) { RefCreateElementwiseWorkloadTest(); } +BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload) +{ + RefCreateElementwiseWorkloadTest(); +} + BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload) { RefCreateElementwiseWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload) +BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload) { RefCreateElementwiseWorkloadTest(); } +BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload) +{ + RefCreateElementwiseWorkloadTest(); +} + BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload) { RefCreateElementwiseWorkloadTest + (DataLayout::NCHW); +} + +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc) +{ + RefCreateBatchNormalizationWorkloadTest + (DataLayout::NHWC); +} + BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload) { RefCreateBatchNormalizationWorkloadTest @@ -486,6 +514,11 @@ BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload) RefCreateSoftmaxWorkloadTest(); } +BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload) +{ + RefCreateSoftmaxWorkloadTest(); +} + BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload) { RefCreateSoftmaxWorkloadTest(); @@ -523,6 +556,11 @@ BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload) RefCreateSplitterWorkloadTest(); } +BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload) +{ + RefCreateSplitterWorkloadTest(); +} + BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload) { RefCreateSplitterWorkloadTest(); @@ -566,6 +604,11 @@ BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32) RefCreateSplitterConcatWorkloadTest(); } +BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16) +{ + RefCreateSplitterConcatWorkloadTest(); +} + BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8) { RefCreateSplitterConcatWorkloadTest(); @@ -654,6 +697,11 @@ BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32) RefCreateResizeBilinearTest(DataLayout::NCHW); } +BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16) +{ + RefCreateResizeBilinearTest(DataLayout::NCHW); +} + BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8) { RefCreateResizeBilinearTest(DataLayout::NCHW); @@ -689,6 +737,11 @@ BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32) RefCreateRsqrtTest(); } +BOOST_AUTO_TEST_CASE(CreateRsqrtFloat16) +{ + RefCreateRsqrtTest(); +} + BOOST_AUTO_TEST_CASE(CreateRsqrtUint8) { RefCreateRsqrtTest(); @@ -717,6 +770,11 @@ BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32) RefCreateBatchToSpaceNdTest(); } +BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16) +{ + RefCreateBatchToSpaceNdTest(); +} + BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8) { RefCreateBatchToSpaceNdTest(); @@ -833,6 +891,11 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload) RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); } +BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload) +{ + RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); +} + BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload) { RefCreateConcatWorkloadTest({ 4, 3, 2, 5 }, 0); @@ -931,6 +994,11 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload) RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32); } +BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload) +{ + RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16); +} + BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload) { RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedAsymm8); @@ -948,6 +1016,13 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload) armnn::InvalidArgumentException); } +BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload) +{ + BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, + armnn::DataType::Float16), + armnn::InvalidArgumentException); +} + BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload) { BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, @@ -980,6 +1055,11 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32) RefCreateSpaceToDepthWorkloadTest(); } +BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16) +{ + RefCreateSpaceToDepthWorkloadTest(); +} + BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8) { RefCreateSpaceToDepthWorkloadTest(); diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index da036a6758..439ac49121 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -362,6 +362,10 @@ ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxUint8, Simple3dSoftmaxUint8Test, 1.0f) ARMNN_AUTO_TEST_CASE(Simple4dSoftmax, Simple4dSoftmaxTest, 1.0f) ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxUint8, Simple4dSoftmaxUint8Test, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxFloat16, SimpleSoftmaxFloat16Test, 1.0f) +ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxFloat16, Simple3dSoftmaxFloat16Test, 1.0f) +ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxFloat16, Simple4dSoftmaxFloat16Test, 1.0f) + ARMNN_AUTO_TEST_CASE(SimpleSoftmaxUint16, SimpleSoftmaxUint16Test, 1.0f) ARMNN_AUTO_TEST_CASE(Simple3dSoftmaxUint16, Simple3dSoftmaxUint16Test, 1.0f) ARMNN_AUTO_TEST_CASE(Simple4dSoftmaxUint16, Simple4dSoftmaxUint16Test, 1.0f) @@ -452,16 +456,19 @@ ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) // Splitter -ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat, SplitterFloatTest) +ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat32, SplitterFloat32Test) +ARMNN_AUTO_TEST_CASE(SimpleSplitterFloat16, SplitterFloat16Test) ARMNN_AUTO_TEST_CASE(SimpleSplitterUint8, SplitterUint8Test) ARMNN_AUTO_TEST_CASE(SimpleSplitterInt16, SplitterInt16Test) -ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat, CopyViaSplitterFloatTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat32, CopyViaSplitterFloat32Test) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterFloat16, CopyViaSplitterFloat16Test) ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) ARMNN_AUTO_TEST_CASE(CopyViaSplitterInt16, CopyViaSplitterInt16Test) // Concat ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest) +ARMNN_AUTO_TEST_CASE(ConcatFloat16, ConcatFloat16Test) ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test) ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest) ARMNN_AUTO_TEST_CASE(ConcatUint16, ConcatUint16Test) @@ -489,6 +496,10 @@ ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest) ARMNN_AUTO_TEST_CASE(SubBroadcast1Element, SubtractionBroadcast1ElementTest) ARMNN_AUTO_TEST_CASE(SubBroadcast, SubtractionBroadcastTest) +ARMNN_AUTO_TEST_CASE(SimpleSubFloat16, SubtractionTest) +ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementFloat16, SubtractionBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(SubBroadcastFloat16, SubtractionBroadcastTest) + ARMNN_AUTO_TEST_CASE(SubtractionUint8, SubtractionUint8Test) ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test) ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test) @@ -502,6 +513,11 @@ ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest) ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest) ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest) ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest) + +ARMNN_AUTO_TEST_CASE(DivisionFloat16, DivisionFloat16Test) +ARMNN_AUTO_TEST_CASE(DivisionFloat16Broadcast1Element, DivisionBroadcast1ElementFloat16Test) +ARMNN_AUTO_TEST_CASE(DivisionFloat16Broadcast1DVector, DivisionBroadcast1DVectorFloat16Test) + // NOTE: division by zero for quantized div needs more attention // see IVGCVSW-1849 ARMNN_AUTO_TEST_CASE(DivisionUint8, DivisionUint8Test) @@ -516,6 +532,9 @@ ARMNN_AUTO_TEST_CASE(DivisionInt16Broadcast1DVector, DivisionBroadcast1DVectorIn ARMNN_AUTO_TEST_CASE(SimpleEqual, EqualSimpleTest) ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest) ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVector, EqualBroadcast1DVectorTest) +ARMNN_AUTO_TEST_CASE(EqualFloat16, EqualFloat16Test) +ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementFloat16, EqualBroadcast1ElementFloat16Test) +ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVectorFloat16, EqualBroadcast1DVectorFloat16Test) ARMNN_AUTO_TEST_CASE(EqualUint8, EqualUint8Test) ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test) ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVectorUint8, EqualBroadcast1DVectorUint8Test) @@ -527,11 +546,17 @@ ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVector, GreaterBroadcast1DVectorTest) ARMNN_AUTO_TEST_CASE(GreaterUint8, GreaterUint8Test) ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test) ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVectorUint8, GreaterBroadcast1DVectorUint8Test) +ARMNN_AUTO_TEST_CASE(GreaterFloat16, GreaterFloat16Test) +ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementFloat16, GreaterBroadcast1ElementFloat16Test) +ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVectorFloat16, GreaterBroadcast1DVectorFloat16Test) // Max ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest) ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest) ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest) +ARMNN_AUTO_TEST_CASE(MaximumFloat16, MaximumFloat16Test) +ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementFloat16, MaximumBroadcast1ElementFloat16Test) +ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorFloat16, MaximumBroadcast1DVectorFloat16Test) ARMNN_AUTO_TEST_CASE(MaximumUint8, MaximumUint8Test) ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test) ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test) @@ -543,6 +568,9 @@ ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorInt16, MaximumBroadcast1DVectorInt1 ARMNN_AUTO_TEST_CASE(SimpleMinimum1, MinimumBroadcast1ElementTest1) ARMNN_AUTO_TEST_CASE(SimpleMinimum2, MinimumBroadcast1ElementTest2) ARMNN_AUTO_TEST_CASE(Minimum1DVectorUint8, MinimumBroadcast1DVectorUint8Test) +ARMNN_AUTO_TEST_CASE(MinimumFloat16, MinimumFloat16Test) +ARMNN_AUTO_TEST_CASE(MinimumBroadcast1ElementFloat16, MinimumBroadcast1ElementFloat16Test) +ARMNN_AUTO_TEST_CASE(MinimumBroadcast1DVectorFloat16, MinimumBroadcast1DVectorFloat16Test) ARMNN_AUTO_TEST_CASE(MinimumInt16, MinimumInt16Test) ARMNN_AUTO_TEST_CASE(MinimumBroadcast1ElementInt16, MinimumBroadcast1ElementInt16Test) ARMNN_AUTO_TEST_CASE(MinimumBroadcast1DVectorInt16, MinimumBroadcast1DVectorInt16Test) @@ -560,8 +588,10 @@ ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorInt16, MultiplicationBroadca ARMNN_AUTO_TEST_CASE(Multiplication5d, Multiplication5dTest) // Batch Norm -ARMNN_AUTO_TEST_CASE(BatchNormFloat, BatchNormFloatTest) -ARMNN_AUTO_TEST_CASE(BatchNormFloatNhwc, BatchNormFloatNhwcTest) +ARMNN_AUTO_TEST_CASE(BatchNormFloat32, BatchNormFloat32Test) +ARMNN_AUTO_TEST_CASE(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest) +ARMNN_AUTO_TEST_CASE(BatchNormFloat16, BatchNormFloat16Test) +ARMNN_AUTO_TEST_CASE(BatchNormFloat16Nhwc, BatchNormFloat16NhwcTest) ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test) ARMNN_AUTO_TEST_CASE(BatchNormUint8Nhwc, BatchNormUint8NhwcTest) ARMNN_AUTO_TEST_CASE(BatchNormInt16, BatchNormInt16Test) @@ -571,6 +601,9 @@ ARMNN_AUTO_TEST_CASE(BatchNormInt16Nhwc, BatchNormInt16NhwcTest) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearFloat16, + SimpleResizeBilinearTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearTest, DataLayout::NCHW) @@ -580,6 +613,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16, ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopFloat16, + ResizeBilinearNopTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopTest, DataLayout::NCHW) @@ -589,6 +625,9 @@ ARMNN_AUTO_TEST_CASE(esizeBilinearNopUint16, ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinFloat16, + ResizeBilinearSqMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinTest, DataLayout::NCHW) @@ -598,6 +637,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16, ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinFloat16, + ResizeBilinearMinTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinTest, DataLayout::NCHW) @@ -607,6 +649,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16, ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagFloat16, + ResizeBilinearMagTest, + DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagTest, DataLayout::NCHW) @@ -618,6 +663,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16, ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwcFloat16, + ResizeBilinearNopTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc, ResizeBilinearNopTest, DataLayout::NHWC) @@ -627,6 +675,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint16Nhwc, ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwcFloat16, + SimpleResizeBilinearTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc, SimpleResizeBilinearTest, DataLayout::NHWC) @@ -636,6 +687,9 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwcFloat16, + ResizeBilinearSqMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc, ResizeBilinearSqMinTest, DataLayout::NHWC) @@ -645,6 +699,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwcFloat16, + ResizeBilinearMinTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc, ResizeBilinearMinTest, DataLayout::NHWC) @@ -654,6 +711,9 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16Nhwc, ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwcFloat16, + ResizeBilinearMagTest, + DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc, ResizeBilinearMagTest, DataLayout::NHWC) @@ -863,6 +923,7 @@ ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, // Floor ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) +ARMNN_AUTO_TEST_CASE(SimpleFloorFloat16, SimpleFloorTest) ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest) // Reshape @@ -876,6 +937,8 @@ ARMNN_AUTO_TEST_CASE(Rsqrt2d, Rsqrt2dTest) ARMNN_AUTO_TEST_CASE(Rsqrt3d, Rsqrt3dTest) ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest) ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest) +ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest) +ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest) ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest) ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest) ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest) @@ -964,6 +1027,11 @@ ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsFloat32, SpaceToBatchNdMultiChan ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockFloat32, SpaceToBatchNdMultiBlockFloat32Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingFloat32, SpaceToBatchNdPaddingFloat32Test) +ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleFloat16, SpaceToBatchNdSimpleFloat16Test) +ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsFloat16, SpaceToBatchNdMultiChannelsFloat16Test) +ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockFloat16, SpaceToBatchNdMultiBlockFloat16Test) +ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingFloat16, SpaceToBatchNdPaddingFloat16Test) + ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleUint8, SpaceToBatchNdSimpleUint8Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsUint8, SpaceToBatchNdMultiChannelsUint8Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockUint8, SpaceToBatchNdMultiBlockUint8Test) @@ -974,6 +1042,11 @@ ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsNhwcFloat32, SpaceToBatchNdMulti ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNhwcFloat32, SpaceToBatchNdMultiBlockNhwcFloat32Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingNhwcFloat32, SpaceToBatchNdPaddingNhwcFloat32Test) +ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleNhwcFloat16, SpaceToBatchNdSimpleNhwcFloat16Test) +ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsNhwcFloat16, SpaceToBatchNdMultiChannelsNhwcFloat16Test) +ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNhwcFloat16, SpaceToBatchNdMultiBlockNhwcFloat16Test) +ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingNhwcFloat16, SpaceToBatchNdPaddingNhwcFloat16Test) + ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleNhwcUint8, SpaceToBatchNdSimpleNhwcUint8Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsNhwcUint8, SpaceToBatchNdMultiChannelsNhwcUint8Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNhwcUint8, SpaceToBatchNdMultiBlockNhwcUint8Test) @@ -990,13 +1063,21 @@ ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiBlockNhwcUint16, SpaceToBatchNdMultiBloc ARMNN_AUTO_TEST_CASE(SpaceToBatchNdPaddingNhwcUint16, SpaceToBatchNdPaddingNhwcUint16Test) // BatchToSpace -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat1, BatchToSpaceNdNhwcTest1) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat2, BatchToSpaceNdNhwcTest2) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat3, BatchToSpaceNdNhwcTest3) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat4, BatchToSpaceNdNhwcTest4) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat5, BatchToSpaceNdNhwcTest5) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat6, BatchToSpaceNdNhwcTest6) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat7, BatchToSpaceNdNhwcTest7) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_1, BatchToSpaceNdNhwcTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_2, BatchToSpaceNdNhwcTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_3, BatchToSpaceNdNhwcTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_4, BatchToSpaceNdNhwcTest4) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_5, BatchToSpaceNdNhwcTest5) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_6, BatchToSpaceNdNhwcTest6) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat32_7, BatchToSpaceNdNhwcTest7) + +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_1, BatchToSpaceNdNhwcTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_2, BatchToSpaceNdNhwcTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_3, BatchToSpaceNdNhwcTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_4, BatchToSpaceNdNhwcTest4) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_6, BatchToSpaceNdNhwcTest6) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2) @@ -1014,13 +1095,13 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_5, BatchToSpaceNdNhwcTest5) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_7, BatchToSpaceNdNhwcTest7) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat1, BatchToSpaceNdNchwTest1) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat2, BatchToSpaceNdNchwTest2) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat3, BatchToSpaceNdNchwTest3) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat4, BatchToSpaceNdNchwTest4) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat5, BatchToSpaceNdNchwTest5) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat6, BatchToSpaceNdNchwTest6) -ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat7, BatchToSpaceNdNchwTest7) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_1, BatchToSpaceNdNchwTest1) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_2, BatchToSpaceNdNchwTest2) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_3, BatchToSpaceNdNchwTest3) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_4, BatchToSpaceNdNchwTest4) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_5, BatchToSpaceNdNchwTest5) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_6, BatchToSpaceNdNchwTest6) +ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1) ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2) @@ -1100,13 +1181,16 @@ ARMNN_AUTO_TEST_CASE(Debug2dQSymm16, Debug2dInt16Test) ARMNN_AUTO_TEST_CASE(Debug1dQSymm16, Debug1dInt16Test) // Gather -ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat, Gather1dParamsFloatTest) +ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat32, Gather1dParamsFloat32Test) +ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat16, Gather1dParamsFloat16Test) ARMNN_AUTO_TEST_CASE(Gather1dParamsUint8, Gather1dParamsUint8Test) ARMNN_AUTO_TEST_CASE(Gather1dParamsInt16, Gather1dParamsInt16Test) -ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat, GatherMultiDimParamsFloatTest) +ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test) +ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat16, GatherMultiDimParamsFloat16Test) ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test) ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsInt16, GatherMultiDimParamsInt16Test) -ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesFloat, GatherMultiDimParamsMultiDimIndicesFloatTest) +ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesFloat32, GatherMultiDimParamsMultiDimIndicesFloat32Test) +ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesFloat16, GatherMultiDimParamsMultiDimIndicesFloat16Test) ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesUint8, GatherMultiDimParamsMultiDimIndicesUint8Test) ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesInt16, GatherMultiDimParamsMultiDimIndicesInt16Test) @@ -1114,6 +1198,8 @@ ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesInt16, GatherMultiDimPar ARMNN_AUTO_TEST_CASE(Abs2d, Abs2dTest) ARMNN_AUTO_TEST_CASE(Abs3d, Abs3dTest) ARMNN_AUTO_TEST_CASE(AbsZero, AbsZeroTest) +ARMNN_AUTO_TEST_CASE(Abs2dFloat16, Abs2dTest) +ARMNN_AUTO_TEST_CASE(Abs3dFloat16, Abs3dTest) ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest) ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest) ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest) @@ -1161,6 +1247,7 @@ ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test) // PReLU ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest) +ARMNN_AUTO_TEST_CASE(PreluFloat16, PreluTest) ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest) ARMNN_AUTO_TEST_CASE(PreluInt16, PreluTest) diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp index 1a29e73af8..cd3708cc4f 100644 --- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp +++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp @@ -149,67 +149,6 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer) } } -BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnCpuRef) -{ - // Test to check when FP16 Turbo mode set - // it converts the FP32 network to FP16 Network - // add FP32ToFP16 conversion layer after the InputLayer - // add FP16ToFP32 conversion layer after the OutputLayer - // checks the other layers if they are supported in FP16 - // if they are not put the conversion layers before and after - // if they are not supported in FP16 use FP32 instead - // if there are inverse conversion layers remove them with optimization - // at the moment FloorLayer is not supported in FP16 so it rolls back to FP32 - // and inverse conversion layers are removed by the optimizer - armnn::Network net; - - // Defines layers. - auto input = net.AddInputLayer(0); - auto floor = net.AddFloorLayer(); - auto output = net.AddOutputLayer(0); - - // Connects layers. - input->GetOutputSlot(0).Connect(floor->GetInputSlot(0)); - floor->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - - armnn::TensorShape shape({4}); - armnn::TensorInfo info(shape, armnn::DataType::Float32); - input->GetOutputSlot(0).SetTensorInfo(info); - floor->GetOutputSlot(0).SetTensorInfo(info); - - armnn::IRuntime::CreationOptions options; - armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - - std::vector backends = {armnn::Compute::CpuRef}; - - armnn::OptimizerOptions optimizerOptions; - optimizerOptions.m_ReduceFp32ToFp16 = true; - - armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(), - optimizerOptions); - - std::ostringstream ss; - optimizedNet->SerializeToDot(ss); - - auto inputId = input->GetGuid(); - auto floorId = floor->GetGuid(); - auto outputId = output->GetGuid(); - - std::stringstream expected; - expected << - "digraph Optimized {\n" - " node [shape=\"record\"];\n" - " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n" - " " << inputId << " [label=\"{Input|LayerType : Input\\lBackendID : CpuRef\\l}\"];\n" - " " << floorId << " [label=\"{Floor|LayerType : Floor\\lBackendID : CpuRef\\l}\"];\n" - " " << outputId << " [label=\"{Output|LayerType : Output\\lBackendID : CpuRef\\l}\"];\n" - " " << inputId << " -> " << floorId << " [label=< [4] >];\n" - " " << floorId << " -> " << outputId << " [label=< [4] >];\n" - "}\n"; - - BOOST_TEST(ss.str() == expected.str()); -} - BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef) { armnn::Network net; -- cgit v1.2.1