From 9bff14458f9950a5d31b9523c62c0bbf79a65fcf Mon Sep 17 00:00:00 2001 From: Matthew Jackson Date: Thu, 12 Sep 2019 09:08:23 +0100 Subject: IVGCVSW-3857 Add Reference FP16 workload support to remaining layers * Adds Reference FP16 support and unit tests for layers not already supported !referencetests:202156 Signed-off-by: Matthew Jackson Change-Id: I6fc9b9ce2809e163f72e27e877025c8fb85d9fbe --- .../test/layerTests/MaximumTestImpl.cpp | 101 +++++++++++++++++++++ 1 file changed, 101 insertions(+) (limited to 'src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp') diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp index d0e624d655..07e2befd66 100644 --- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp @@ -111,6 +111,107 @@ LayerTestResult MaximumBroadcast1DVectorTest( output); } +LayerTestResult MaximumFloat16Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int width = 2u; + const unsigned int height = 2u; + const unsigned int channelCount = 2u; + const unsigned int batchSize = 2u; + + unsigned int shape[] = { batchSize, channelCount, height, width }; + + std::vector input0 = + { + 1._h, 1._h, 1._h, 1._h, 5._h, 5._h, 5._h, 5._h, + 3._h, 3._h, 3._h, 3._h, 4._h, 4._h, 4._h, 4._h + }; + + std::vector input1 = + { + 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 3._h, 3._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + std::vector output = + { + 2._h, 2._h, 2._h, 2._h, 5._h, 5._h, 5._h, 5._h, + 4._h, 4._h, 4._h, 4._h, 5._h, 5._h, 5._h, 5._h + }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape, + input0, + shape, + input1, + shape, + output); +} + +LayerTestResult MaximumBroadcast1ElementFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + unsigned int shape0[] = { 1, 2, 2, 2 }; + unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0 = { 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; + + std::vector input1 = { 2._h }; + + std::vector output = { 2._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + +LayerTestResult MaximumBroadcast1DVectorFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using namespace half_float::literal; + + const unsigned int shape0[] = { 1, 2, 2, 3 }; + const unsigned int shape1[] = { 1, 1, 1, 3 }; + + std::vector input0 = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + std::vector input1 = { 1._h, 2._h, 3._h }; + + std::vector output = + { + 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h, 10._h, 11._h, 12._h + }; + + return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float16>( + workloadFactory, + memoryManager, + shape0, + input0, + shape1, + input1, + shape0, + output); +} + LayerTestResult MaximumUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -- cgit v1.2.1