From 8ed4b8c34df929d29952f6b3f835bf61d57466e7 Mon Sep 17 00:00:00 2001 From: Nina Drozd Date: Wed, 29 May 2019 10:41:04 +0100 Subject: IVGCVSW-3172 Add QSymm16 support for reshape workload * Added QSymm16 to supported types in WorkloadData * Added QSymm16 to supported types in RefLayerSupport * Created templated SimpleReshapeTest in LayerTests * Updated ClLayerTests to use templated SimpleReshapeTest * Updated NeonLayerTests to use templated SimpleReshapeTest * Added test for QSymm16 to RefCreateWorkloadTests, RefLayerTests * Removed ReshapeTestImpl as all reshape test methods were moved * Added FloorTestImpl for existing SimpleFloorTest Change-Id: I78efede8aab74c2d4cb0841dd426b8e06186133d Signed-off-by: Nina Drozd --- src/backends/backendsCommon/WorkloadData.cpp | 3 +- src/backends/backendsCommon/test/CMakeLists.txt | 2 +- src/backends/backendsCommon/test/FloorTestImpl.hpp | 56 ++++++ src/backends/backendsCommon/test/LayerTests.cpp | 2 +- src/backends/backendsCommon/test/LayerTests.hpp | 96 ++++++++++- .../backendsCommon/test/ReshapeTestImpl.hpp | 188 --------------------- src/backends/cl/test/ClLayerTests.cpp | 4 +- src/backends/neon/test/NeonLayerTests.cpp | 4 +- src/backends/reference/RefLayerSupport.cpp | 5 +- .../reference/test/RefCreateWorkloadTests.cpp | 9 +- src/backends/reference/test/RefLayerTests.cpp | 5 +- 11 files changed, 168 insertions(+), 206 deletions(-) create mode 100644 src/backends/backendsCommon/test/FloorTestImpl.hpp delete mode 100644 src/backends/backendsCommon/test/ReshapeTestImpl.hpp diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 9bb95f67af..96e83c21dd 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -878,7 +878,8 @@ void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { DataType::Float32, DataType::Float16, - DataType::QuantisedAsymm8 + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 }; ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, "ReshapeQueueDescriptor"); diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 508fc77ba6..040835e7f2 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -41,7 +41,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources Pooling2dTestImpl.hpp QuantizeHelper.hpp QuantizeTestImpl.hpp - ReshapeTestImpl.hpp + FloorTestImpl.hpp RuntimeTestImpl.hpp SoftmaxTestImpl.hpp SplitterEndToEndTestImpl.hpp diff --git a/src/backends/backendsCommon/test/FloorTestImpl.hpp b/src/backends/backendsCommon/test/FloorTestImpl.hpp new file mode 100644 index 0000000000..419b1b7bd8 --- /dev/null +++ b/src/backends/backendsCommon/test/FloorTestImpl.hpp @@ -0,0 +1,56 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "QuantizeHelper.hpp" +#include "WorkloadTestUtils.hpp" + +#include +#include +#include + +#include +#include +#include + +#include + +LayerTestResult SimpleFloorTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo(inputTensorInfo); + + auto input = MakeTensor(inputTensorInfo, + { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }); + + LayerTestResult ret(outputTensorInfo); + ret.outputExpected = MakeTensor(outputTensorInfo, + { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f, + 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f }); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::FloorQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateFloor(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index 402e86de00..bc8e13d1ae 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -30,7 +30,7 @@ #include "BatchNormTestImpl.hpp" #include "ActivationTestImpl.hpp" #include "Pooling2dTestImpl.hpp" -#include "ReshapeTestImpl.hpp" +#include "FloorTestImpl.hpp" #include "FullyConnectedTestImpl.hpp" #include "GatherTestImpl.hpp" #include "SpaceToBatchNdTestImpl.hpp" diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index f8f50366eb..edb70455d5 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -373,11 +373,8 @@ LayerTestResult SimpleSigmoidTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -LayerTestResult SimpleReshapeFloat32Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); - -LayerTestResult SimpleReshapeUint8Test( +template> +LayerTestResult SimpleReshapeTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -1711,6 +1708,42 @@ std::vector ConvertToDataType(const std::vector& input, return output; } +template +LayerTestResult SimpleReshapeTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + const std::vector& inputData, + const std::vector& outputExpectedData) +{ + auto input = MakeTensor(inputTensorInfo, inputData); + + LayerTestResult ret(outputTensorInfo); + ret.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ReshapeQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateReshape(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + template LayerTestResult FullyConnectedTest( armnn::IWorkloadFactory& workloadFactory, @@ -1780,4 +1813,57 @@ LayerTestResult FullyConnectedTest( } return result; +} + + +template +LayerTestResult SimpleReshapeTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 2, 2, 3, 3 }; + unsigned int outputShape[] = { 2, 2, 9, 1 }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType); + inputTensorInfo.SetQuantizationScale(1.0f); + outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType); + outputTensorInfo.SetQuantizationScale(1.0f); + + auto input = ConvertToDataType( + { + 0.0f, 1.0f, 2.0f, + 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, + + 9.0f, 10.0f, 11.0f, + 12.0f, 13.0f, 14.0f, + 15.0f, 16.0f, 17.0f, + + 18.0f, 19.0f, 20.0f, + 21.0f, 22.0f, 23.0f, + 24.0f, 25.0f, 26.0f, + + 27.0f, 28.0f, 29.0f, + 30.0f, 31.0f, 32.0f, + 33.0f, 34.0f, 35.0f, + }, + inputTensorInfo); + + auto outputExpected = ConvertToDataType( + { + 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, + + 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, + + 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, + + 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, + }, + outputTensorInfo); + + return SimpleReshapeTestImpl( + workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected); } \ No newline at end of file diff --git a/src/backends/backendsCommon/test/ReshapeTestImpl.hpp b/src/backends/backendsCommon/test/ReshapeTestImpl.hpp deleted file mode 100644 index 49918c5cd1..0000000000 --- a/src/backends/backendsCommon/test/ReshapeTestImpl.hpp +++ /dev/null @@ -1,188 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// -#pragma once - -#include "QuantizeHelper.hpp" -#include "WorkloadTestUtils.hpp" - -#include -#include -#include - -#include -#include -#include - -#include - -template -LayerTestResult SimpleReshapeTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - armnn::TensorInfo inputTensorInfo, - armnn::TensorInfo outputTensorInfo, - const std::vector& inputData, - const std::vector& outputExpectedData) -{ - auto input = MakeTensor(inputTensorInfo, inputData); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ReshapeQueueDescriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateReshape(data, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->Execute(); - - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - - return ret; -} - -LayerTestResult SimpleReshapeFloat32Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - armnn::TensorInfo inputTensorInfo; - armnn::TensorInfo outputTensorInfo; - - unsigned int inputShape[] = { 2, 2, 3, 3 }; - unsigned int outputShape[] = { 2, 2, 9, 1 }; - - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); - - std::vector input = std::vector( - { - 0.0f, 1.0f, 2.0f, - 3.0f, 4.0f, 5.0f, - 6.0f, 7.0f, 8.0f, - - 9.0f, 10.0f, 11.0f, - 12.0f, 13.0f, 14.0f, - 15.0f, 16.0f, 17.0f, - - 18.0f, 19.0f, 20.0f, - 21.0f, 22.0f, 23.0f, - 24.0f, 25.0f, 26.0f, - - 27.0f, 28.0f, 29.0f, - 30.0f, 31.0f, 32.0f, - 33.0f, 34.0f, 35.0f, - }); - - std::vector outputExpected = std::vector( - { - 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, - - 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, - - 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, - - 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, - }); - - return SimpleReshapeTestImpl( - workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected); -} - -LayerTestResult SimpleFloorTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo(inputTensorInfo); - - auto input = MakeTensor(inputTensorInfo, - { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }); - - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, - { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f, - 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f }); - - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - - armnn::FloorQueueDescriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr workload = workloadFactory.CreateFloor(data, info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); - - workload->Execute(); - - CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); - - return ret; -} - -LayerTestResult SimpleReshapeUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) -{ - armnn::TensorInfo inputTensorInfo; - armnn::TensorInfo outputTensorInfo; - - unsigned int inputShape[] = { 2, 2, 3, 3 }; - unsigned int outputShape[] = { 2, 2, 9, 1 }; - - inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::QuantisedAsymm8); - inputTensorInfo.SetQuantizationScale(1.0f); - outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::QuantisedAsymm8); - outputTensorInfo.SetQuantizationScale(1.0f); - - std::vector input = std::vector( - { - 0, 1, 2, - 3, 4, 5, - 6, 7, 8, - - 9, 10, 11, - 12, 13, 14, - 15, 16, 17, - - 18, 19, 20, - 21, 22, 23, - 24, 25, 26, - - 27, 28, 29, - 30, 31, 32, - 33, 34, 35, - }); - - std::vector outputExpected = std::vector( - { - 0, 1, 2, 3, 4, 5, 6, 7, 8, - - 9, 10, 11, 12, 13, 14, 15, 16, 17, - - 18, 19, 20, 21, 22, 23, 24, 25, 26, - - 27, 28, 29, 30, 31, 32, 33, 34, 35, - }); - - return SimpleReshapeTestImpl( - workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected); -} diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 17be230271..23c975e668 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -308,8 +308,8 @@ ARMNN_AUTO_TEST_CASE(Concatenation4dDiffShapeDim3Uint8, Concatenation4dDiffShape ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) // Reshape -ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) -ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest) +ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest) // Pad ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest) diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 1ea227a14a..d89011d3ae 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -410,8 +410,8 @@ ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVectorUint8, GreaterBroadcast1DVectorUint8Test) // Reshape -ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) -ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest) +ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest) // Pad ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest) diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 9a691a6fa7..a9cddfd1bb 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -1021,11 +1021,12 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input, { ignore_unused(descriptor); // Define supported output types. - std::array supportedOutputTypes = + std::array supportedOutputTypes = { DataType::Float32, DataType::Float16, - DataType::QuantisedAsymm8 + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 }; return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported, "Reference reshape: input type not supported."); diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index 2222a22cb3..fef2567d07 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -661,16 +661,21 @@ static void RefCreateReshapeWorkloadTest() TensorInfo({ 1, 4 }, DataType)); } -BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload) +BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32) { RefCreateReshapeWorkloadTest(); } -BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) +BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8) { RefCreateReshapeWorkloadTest(); } +BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16) +{ + RefCreateReshapeWorkloadTest(); +} + template static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape, unsigned int concatAxis) diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 1207c1d648..a78b5aae5d 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -449,8 +449,9 @@ ARMNN_AUTO_TEST_CASE(Concatenation4dDiffShapeDim3Uint8, Concatenation4dDiffShape ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) // Reshape -ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) -ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest) +ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest) +ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest) // Rsqrt ARMNN_AUTO_TEST_CASE(Rsqrt2d, Rsqrt2dTest) -- cgit v1.2.1