From 5e90b831fc31605b5734531387837123fb31d0e0 Mon Sep 17 00:00:00 2001 From: Declan-ARM Date: Wed, 7 Feb 2024 13:07:31 +0000 Subject: IVGCVSW-7569 GpuFsa Op: Add Reshape Operator * Add Reshape EndToEnd tests to all backends Signed-off-by: Declan-ARM Change-Id: Ic6d07ba8de0cf3271ed0e4c6d604e070ccb968e3 --- src/backends/cl/test/ClEndToEndTests.cpp | 20 ++++++ src/backends/gpuFsa/GpuFsaBackend.cpp | 10 ++- src/backends/gpuFsa/GpuFsaLayerSupport.cpp | 16 +++++ src/backends/gpuFsa/layers/CMakeLists.txt | 2 + src/backends/gpuFsa/layers/GpuFsaReshape.cpp | 78 ++++++++++++++++++++++ src/backends/gpuFsa/layers/GpuFsaReshape.hpp | 21 ++++++ src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp | 65 ++++++++++-------- .../gpuFsa/test/GpuFsaLayerSupportTests.cpp | 57 ++++++++++++++++ src/backends/neon/test/NeonEndToEndTests.cpp | 20 ++++++ src/backends/reference/test/RefEndToEndTests.cpp | 21 ++++++ 10 files changed, 282 insertions(+), 28 deletions(-) create mode 100644 src/backends/gpuFsa/layers/GpuFsaReshape.cpp create mode 100644 src/backends/gpuFsa/layers/GpuFsaReshape.hpp diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp index c48aa8aac0..9e60843177 100644 --- a/src/backends/cl/test/ClEndToEndTests.cpp +++ b/src/backends/cl/test/ClEndToEndTests.cpp @@ -496,6 +496,26 @@ TEST_CASE("ClReshapeEndToEndTestFloat16") ReshapeEndToEndFloat16(clDefaultBackends); } +TEST_CASE("ClReshapeEndToEndTestInt32") +{ + ReshapeEndToEnd(clDefaultBackends); +} + +TEST_CASE("ClReshapeEndToEndTestInt16") +{ + ReshapeEndToEnd(clDefaultBackends); +} + +TEST_CASE("ClReshapeEndToEndTestUInt8") +{ + ReshapeEndToEnd(clDefaultBackends); +} + +TEST_CASE("ClReshapeEndToEndTestInt8") +{ + ReshapeEndToEnd(clDefaultBackends); +} + // Resize Bilinear TEST_CASE("ClResizeBilinearEndToEndFloatNchwTest") { diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp index 29eb1adb6c..8c0aac625d 100644 --- a/src/backends/gpuFsa/GpuFsaBackend.cpp +++ b/src/backends/gpuFsa/GpuFsaBackend.cpp @@ -13,7 +13,6 @@ #include #include -#include #include #include @@ -27,6 +26,7 @@ #include "layers/GpuFsaDepthwiseConvolution2d.hpp" #include "layers/GpuFsaElementwiseBinary.hpp" #include "layers/GpuFsaPooling2d.hpp" +#include "layers/GpuFsaReshape.hpp" #include "layers/GpuFsaResize.hpp" #include "layers/GpuFsaSoftmax.hpp" @@ -338,6 +338,14 @@ OptimizationViews GpuFsaBackend::OptimizeSubgraphView(const SubgraphView& subgra GpuFsaPooling2dCreateOp(preCompiledBlobPtr, input, *desc); break; } + case LayerType::Reshape: + { + auto input = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); + auto desc = PolymorphicDowncast(&base.GetParameters()); + GpuFsaReshapeCreateOp(preCompiledBlobPtr, input, *desc); + + break; + } case (LayerType::Resize): { auto input = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp index d75f18ccdb..2065998434 100644 --- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp +++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp @@ -17,6 +17,7 @@ #include "layers/GpuFsaDepthwiseConvolution2d.hpp" #include "layers/GpuFsaElementwiseBinary.hpp" #include "layers/GpuFsaPooling2d.hpp" +#include "layers/GpuFsaReshape.hpp" #include "layers/GpuFsaResize.hpp" #include "layers/GpuFsaSoftmax.hpp" #endif @@ -206,6 +207,21 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type, infos[0], *desc); } + case LayerType::Reshape: + { + if (infos.size() != 2) + { + throw InvalidArgumentException("Invalid number of Reshape TensorInfos. " + "TensorInfos should be of format: { input, output }."); + } + + auto desc = PolymorphicDowncast(&descriptor); + + FORWARD_LAYER_VALIDATE_FUNC(GpuFsaReshapeValidate, + reasonIfUnsupported, + infos[0], + *desc); + } case LayerType::Resize: { if (infos.size() != 2) diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt index 38d551140b..b3e8d6a2a0 100644 --- a/src/backends/gpuFsa/layers/CMakeLists.txt +++ b/src/backends/gpuFsa/layers/CMakeLists.txt @@ -18,6 +18,8 @@ list(APPEND armnnGpuFsaBackendLayers_sources GpuFsaElementwiseBinary.hpp GpuFsaPooling2d.cpp GpuFsaPooling2d.hpp + GpuFsaReshape.cpp + GpuFsaReshape.hpp GpuFsaResize.cpp GpuFsaResize.hpp GpuFsaSoftmax.cpp diff --git a/src/backends/gpuFsa/layers/GpuFsaReshape.cpp b/src/backends/gpuFsa/layers/GpuFsaReshape.cpp new file mode 100644 index 0000000000..47d4e42838 --- /dev/null +++ b/src/backends/gpuFsa/layers/GpuFsaReshape.cpp @@ -0,0 +1,78 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "GpuFsaReshape.hpp" + +#include + +#include +#include +#include +#include + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace armnn +{ + +using namespace armcomputetensorutils; + +arm_compute::Status GpuFsaReshapeValidate(const TensorInfo& input, const ReshapeDescriptor& descriptor) +{ + auto compileContext = arm_compute::CLKernelLibrary::get().get_compile_context(); + auto workloadContext = GpuWorkloadContext(&compileContext); + + GpuWorkloadSketch sketch(&workloadContext); + + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, input.GetNumDimensions()); + aclInputInfo.set_are_values_constant(input.IsConstant()); + + arm_compute::ITensorInfo* inputInfo = workloadContext.create_tensor_info(aclInputInfo); + + ReshapeAttributes attributes; + attributes.shape(BuildArmComputeTensorShape(descriptor.m_TargetShape)); + + arm_compute::Status aclStatus = GpuReshape::validate_op(sketch, inputInfo, attributes); + +#ifndef NDEBUG + if (aclStatus.error_code() != arm_compute::ErrorCode::OK) + { + std::cout << "GpuFsaReshapeValidate failed: " << aclStatus.error_description() << std::endl; + } +#endif + + return aclStatus; +} + +void GpuFsaReshapeCreateOp(GpuFsaPreCompiledBlob* blob, const TensorInfo& input, const ReshapeDescriptor& descriptor) +{ + GpuWorkloadSketch* sketch = blob->sketch.get(); + GpuWorkloadContext* workloadContext = blob->workloadContext.get(); + + std::vector inputTensorInfos; + std::vector outputTensorInfos; + + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, input.GetNumDimensions()); + + aclInputInfo.set_are_values_constant(input.IsConstant()); + + inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInputInfo)); + + ReshapeAttributes attributes; + attributes.shape(BuildArmComputeTensorShape(descriptor.m_TargetShape)); + + arm_compute::ITensorInfo* addOutputInfo = GpuReshape::create_op(*sketch, inputTensorInfos[0], attributes); + + // Temporary fix until fusing attempt is made for GpuFsa backend and outputLayer workoad is created + outputTensorInfos.emplace_back(workloadContext->create_tensor_info()); + GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]); + + // Store the tensorInfos within the blob as std::unique_ptr<> so they can be used later + blob->inputTensorInfos = std::make_unique>(inputTensorInfos); + blob->outputTensorInfos = std::make_unique>(outputTensorInfos); +} + +} // namespace armnn + diff --git a/src/backends/gpuFsa/layers/GpuFsaReshape.hpp b/src/backends/gpuFsa/layers/GpuFsaReshape.hpp new file mode 100644 index 0000000000..16fa1f975e --- /dev/null +++ b/src/backends/gpuFsa/layers/GpuFsaReshape.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +namespace armnn +{ + +arm_compute::Status GpuFsaReshapeValidate(const TensorInfo& input, const ReshapeDescriptor& descriptor); + +void GpuFsaReshapeCreateOp(GpuFsaPreCompiledBlob* blob, + const TensorInfo& input, + const ReshapeDescriptor& descriptor); + +} // namespace armnn + diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp index 06b2a71dee..329929115a 100644 --- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp +++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp @@ -12,6 +12,7 @@ #include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp" #include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp" #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp" +#include "backendsCommon/test/ReshapeEndToEndTestImpl.hpp" #include "backendsCommon/test/ResizeEndToEndTestImpl.hpp" #include "backendsCommon/test/SoftmaxEndToEndTestImpl.hpp" @@ -125,15 +126,8 @@ TEST_CASE("GpuFsaAvgPool2DEndtoEndTestFloat16") TEST_CASE("UNSUPPORTED_GpuFsaAvgPool2DIgnoreValueEndtoEndTestFloat32") { // Exclude padding must be set to true in Attributes! to be supported by GPU - try - { - AvgPool2dEndToEnd(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue); - FAIL("An exception should have been thrown"); - } - catch (const armnn::InvalidArgumentException& e) - { - CHECK(strcmp(e.what(), "Failed to assign a backend to each layer") == 0); - } + CHECK_THROWS_AS(AvgPool2dEndToEnd(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue), + armnn::InvalidArgumentException); } // Max Pool 2D @@ -150,15 +144,40 @@ TEST_CASE("GpuFsaMaxPool2DEndtoEndTestFloat16") TEST_CASE("UNSUPPORTED_GpuFsaMaxPool2DIgnoreValueEndtoEndTestFloat32") { // Exclude padding must be set to true in Attributes! to be supported by GPU - try - { - MaxPool2dEndToEnd(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue); - FAIL("An exception should have been thrown"); - } - catch (const armnn::InvalidArgumentException& e) - { - CHECK(strcmp(e.what(), "Failed to assign a backend to each layer") == 0); - } + CHECK_THROWS_AS(MaxPool2dEndToEnd(gpuFsaDefaultBackends, PaddingMethod::IgnoreValue), + armnn::InvalidArgumentException); +} + +// Reshape +TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestFloat32") +{ + CHECK_THROWS_AS(ReshapeEndToEnd(gpuFsaDefaultBackends), armnn::InvalidArgumentException); +} + +TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestFloat16") +{ + CHECK_THROWS_AS(ReshapeEndToEndFloat16(gpuFsaDefaultBackends), + armnn::InvalidArgumentException); +} + +TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestInt32") +{ + CHECK_THROWS_AS(ReshapeEndToEnd(gpuFsaDefaultBackends),armnn::InvalidArgumentException); +} + +TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestInt16") +{ + CHECK_THROWS_AS(ReshapeEndToEnd(gpuFsaDefaultBackends),armnn::InvalidArgumentException); +} + +TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestUInt8") +{ + CHECK_THROWS_AS(ReshapeEndToEnd(gpuFsaDefaultBackends),armnn::InvalidArgumentException); +} + +TEST_CASE("UNSUPPORTED_GpuFsaReshapeTestInt8") +{ + CHECK_THROWS_AS(ReshapeEndToEnd(gpuFsaDefaultBackends), armnn::InvalidArgumentException); } // Resize Bilinear @@ -187,15 +206,7 @@ TEST_CASE("GpuFsaResizeNearestNeighborEndToEndFloatHalfPixelNhwcTest") TEST_CASE("UNSUPPORTED_GpuFsaSoftmaxTestFloat32") { - try - { - SoftmaxEndToEnd(gpuFsaDefaultBackends); - FAIL("An exception should have been thrown"); - } - catch (const armnn::InvalidArgumentException& e) - { - CHECK(strcmp(e.what(), "Failed to assign a backend to each layer") == 0); - } + CHECK_THROWS_AS(SoftmaxEndToEnd(gpuFsaDefaultBackends), armnn::InvalidArgumentException); } } diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp index cf465c28ff..dce98b389f 100644 --- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp +++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp @@ -176,6 +176,63 @@ TEST_CASE("IsLayerSupportedGpuFsaPooling2d") CHECK(supported); } +TEST_CASE("UNSUPPORTED_IsLayerSupportedGpuFsaReshape") +{ + TensorInfo inputInfo{}; + TensorInfo outputInfo{}; + + SUBCASE("Float32") + { + inputInfo = { { 2, 3 }, DataType::Float32 }; + outputInfo = { { 6 } , DataType::Float32 }; + } + + SUBCASE("Float16") + { + inputInfo = { { 2, 3 }, DataType::Float16 }; + outputInfo = { { 6 } , DataType::Float16 }; + } + + SUBCASE("Int32") + { + inputInfo = { { 2, 3 }, DataType::Signed32 }; + outputInfo = { { 6 } , DataType::Signed32 }; + } + + SUBCASE("Int16") + { + inputInfo = { { 2, 3 }, DataType::QSymmS16 }; + outputInfo = { { 6 } , DataType::QSymmS16 }; + } + + SUBCASE("UInt8") + { + inputInfo = { { 2, 3 }, DataType::QAsymmU8 }; + outputInfo = { { 6 } , DataType::QAsymmU8 }; + } + + SUBCASE("Int8") + { + inputInfo = { { 2, 3 }, DataType::QAsymmS8 }; + outputInfo = { { 6 } , DataType::QAsymmS8 }; + } + + ReshapeDescriptor desc; + desc.m_TargetShape = outputInfo.GetShape(); + + GpuFsaLayerSupport supportChecker; + std::string reasonIfNotSupported; + + auto supported = supportChecker.IsLayerSupported(LayerType::Reshape, + { inputInfo, outputInfo }, + desc, + EmptyOptional(), + EmptyOptional(), + reasonIfNotSupported); + + CHECK(!supported); +} + TEST_CASE("IsLayerSupportedGpuFsaResize") { TensorInfo inputInfo({ 1, 5, 5, 1 }, DataType::Float32); diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp index f505b7fd46..6634ac6673 100644 --- a/src/backends/neon/test/NeonEndToEndTests.cpp +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -773,6 +773,26 @@ TEST_CASE("NeonReshapeEndToEndTestFloat16") ReshapeEndToEndFloat16(neonDefaultBackends); } +TEST_CASE("NeonReshapeEndToEndTestInt32") +{ + ReshapeEndToEnd(neonDefaultBackends); +} + +TEST_CASE("NeonReshapeEndToEndTestInt16") +{ + ReshapeEndToEnd(neonDefaultBackends); +} + +TEST_CASE("NeonReshapeEndToEndTestUInt8") +{ + ReshapeEndToEnd(neonDefaultBackends); +} + +TEST_CASE("NeonReshapeEndToEndTestInt8") +{ + ReshapeEndToEnd(neonDefaultBackends); +} + // Resize Bilinear TEST_CASE("NeonResizeBilinearEndToEndFloatNchwTest") { diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 866cff8ca3..4af54852fc 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -1886,6 +1886,27 @@ TEST_CASE("RefReshapeEndToEndTestFloat16") ReshapeEndToEndFloat16(defaultBackends); } +TEST_CASE("RefReshapeEndToEndTestInt32") +{ + ReshapeEndToEnd(defaultBackends); +} + +TEST_CASE("RefReshapeEndToEndTestInt16") +{ + ReshapeEndToEnd(defaultBackends); +} + +TEST_CASE("RefReshapeEndToEndTestUInt8") +{ + ReshapeEndToEnd(defaultBackends); +} + +TEST_CASE("RefReshapeEndToEndTestInt8") +{ + ReshapeEndToEnd(defaultBackends); +} + +// Force Import TEST_CASE("RefForceImportWithAlignedBuffersEndToEndTest") { ForceImportWithAlignedBuffersEndToEndTest(defaultBackends); -- cgit v1.2.1