From 04059e5c4e23085f0a70456c2ff5fe1bc029eb06 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 12 Jan 2023 09:58:07 +0000 Subject: IVGCVSW-7381 Add IsLayerSupported implementation to GpuFsa backend Signed-off-by: Matthew Sloyan Change-Id: Ib18af4a4be4a87e301ff0d0fea5205e985052846 --- src/backends/aclCommon/ArmComputeTensorUtils.hpp | 12 ++- src/backends/gpuFsa/CMakeLists.txt | 1 + src/backends/gpuFsa/GpuFsaLayerSupport.cpp | 90 ++++++++++++++++++++-- src/backends/gpuFsa/GpuFsaLayerSupport.hpp | 9 ++- src/backends/gpuFsa/backend.cmake | 1 + src/backends/gpuFsa/backend.mk | 3 +- src/backends/gpuFsa/layerValidators/CMakeLists.txt | 14 ++++ .../GpuFsaConvolution2dValidate.cpp | 88 +++++++++++++++++++++ .../GpuFsaConvolution2dValidate.hpp | 19 +++++ .../gpuFsa/test/GpuFsaLayerSupportTests.cpp | 55 ++++++++++++- 10 files changed, 277 insertions(+), 15 deletions(-) create mode 100644 src/backends/gpuFsa/layerValidators/CMakeLists.txt create mode 100644 src/backends/gpuFsa/layerValidators/GpuFsaConvolution2dValidate.cpp create mode 100644 src/backends/gpuFsa/layerValidators/GpuFsaConvolution2dValidate.hpp diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp index 6ddecf2aaa..14634d95b3 100644 --- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp +++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -99,6 +99,16 @@ arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(const Descriptor &descri arm_compute::DimensionRoundingType::FLOOR); } +/// Utility function used to setup an arm_compute::Padding2D object from an armnn layer descriptor. +template +arm_compute::Padding2D BuildArmComputePaddingInfo(const Descriptor &descriptor) +{ + return arm_compute::Padding2D(descriptor.m_PadLeft, + descriptor.m_PadRight, + descriptor.m_PadTop, + descriptor.m_PadBottom); +} + /// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor. template void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo) diff --git a/src/backends/gpuFsa/CMakeLists.txt b/src/backends/gpuFsa/CMakeLists.txt index 635b25b2d5..8d1a58ee27 100644 --- a/src/backends/gpuFsa/CMakeLists.txt +++ b/src/backends/gpuFsa/CMakeLists.txt @@ -23,6 +23,7 @@ if(ARMCOMPUTEGPUFSA) GpuFsaWorkloadFactory.hpp ) + add_subdirectory(layerValidators) add_subdirectory(workloads) if(BUILD_UNIT_TESTS) diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp index 6ae63a5668..7faad2ba73 100644 --- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp +++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -7,13 +7,58 @@ #include #include -#include +#include + +#if defined(ARMCOMPUTEGPUFSA_ENABLED) +#include "layerValidators/GpuFsaConvolution2dValidate.hpp" +#endif #include namespace armnn { +template +bool IsGpuFsaBackendSupported(Optional reasonIfUnsupported, Args... args) +{ + IgnoreUnused(reasonIfUnsupported, (args)...); +#if defined(ARMCOMPUTEGPUFSA_ENABLED) + return true; +#else + if (reasonIfUnsupported) + { + reasonIfUnsupported.value() = "The armnn library has been built without CL support"; + } + return false; +#endif +} + +#if defined(ARMCOMPUTEGPUFSA_ENABLED) +#define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) (expr) +#else +#define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) IsGpuFsaBackendSupported(reasonIfUnsupported) +#endif + +#if defined(ARMCOMPUTEGPUFSA_ENABLED) +template +inline bool CheckIsLayerSupported(FuncType&& func, Optional reasonIfUnsupported, Args&&... args) +{ + arm_compute::Status aclStatus = func(std::forward(args)...); + const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); + if (!supported && reasonIfUnsupported) + { + reasonIfUnsupported.value() = aclStatus.error_description(); + } + return supported; +} + +#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return CheckIsLayerSupported(func, reasonIfUnsupported, __VA_ARGS__); +#else +#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsGpuFsaBackendSupported(reasonIfUnsupported, __VA_ARGS__); +#endif + bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type, const std::vector& infos, const BaseDescriptor& descriptor, @@ -21,14 +66,45 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type, const Optional& quantizedLstmInputParamsInfo, Optional reasonIfUnsupported) const { - IgnoreUnused(type); - IgnoreUnused(infos); - IgnoreUnused(descriptor); IgnoreUnused(lstmParamsInfo); IgnoreUnused(quantizedLstmInputParamsInfo); - IgnoreUnused(reasonIfUnsupported); - return false; + switch (type) { + case LayerType::Convolution2d: + { + if (infos.size() != 4) + { + throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. " + "TensorInfos should be of format: {input, output, weights, biases}."); + } + + auto desc = *(PolymorphicDowncast(&descriptor)); + if (infos[3] == TensorInfo()) + { + FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate, + reasonIfUnsupported, + infos[0], + desc, + infos[2], + EmptyOptional()); + } + else + { + FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate, + reasonIfUnsupported, + infos[0], + desc, + infos[2], + infos[3]); + } + } + case LayerType::Input: + case LayerType::Output: + return IsGpuFsaBackendSupported(reasonIfUnsupported, infos[0]); + default: + // Layers not supported in the GpuFsa backend. + return false; + } } } // namespace armnn \ No newline at end of file diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.hpp b/src/backends/gpuFsa/GpuFsaLayerSupport.hpp index dffc84cdcb..31177ec3c9 100644 --- a/src/backends/gpuFsa/GpuFsaLayerSupport.hpp +++ b/src/backends/gpuFsa/GpuFsaLayerSupport.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -7,9 +7,11 @@ #include #include -namespace armnn { +namespace armnn +{ -class GpuFsaLayerSupport : public ILayerSupport { +class GpuFsaLayerSupport : public ILayerSupport +{ public: bool IsLayerSupported(const LayerType& type, const std::vector& infos, @@ -17,7 +19,6 @@ public: const Optional& lstmParamsInfo, const Optional&, Optional reasonIfUnsupported) const override; - }; } // namespace armnn \ No newline at end of file diff --git a/src/backends/gpuFsa/backend.cmake b/src/backends/gpuFsa/backend.cmake index 2f4f5fbc7b..9167e84ffd 100644 --- a/src/backends/gpuFsa/backend.cmake +++ b/src/backends/gpuFsa/backend.cmake @@ -7,6 +7,7 @@ add_subdirectory(${PROJECT_SOURCE_DIR}/src/backends/gpuFsa) list(APPEND armnnLibraries armnnGpuFsaBackend) if(ARMCOMPUTEGPUFSA) + list(APPEND armnnLibraries armnnGpuFsaBackendLayerValidators) list(APPEND armnnLibraries armnnGpuFsaBackendWorkloads) list(APPEND armnnUnitTestLibraries armnnGpuFsaBackendUnitTests) else() diff --git a/src/backends/gpuFsa/backend.mk b/src/backends/gpuFsa/backend.mk index 78ba7ba167..d8d254205b 100644 --- a/src/backends/gpuFsa/backend.mk +++ b/src/backends/gpuFsa/backend.mk @@ -21,7 +21,8 @@ BACKEND_SOURCES := \ GpuFsaLayerSupport.cpp \ GpuFsaRegistryInitializer.cpp \ GpuFsaTensorHandleFactory.cpp \ - GpuFsaWorkloadFactory.cpp + GpuFsaWorkloadFactory.cpp \ + layerValidators/GpuFsaConvolution2dValidate.cpp else # ARMNN_COMPUTE_GPUFSA_ENABLED == 0 diff --git a/src/backends/gpuFsa/layerValidators/CMakeLists.txt b/src/backends/gpuFsa/layerValidators/CMakeLists.txt new file mode 100644 index 0000000000..57ea41d56c --- /dev/null +++ b/src/backends/gpuFsa/layerValidators/CMakeLists.txt @@ -0,0 +1,14 @@ +# +# Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +# SPDX-License-Identifier: MIT +# + +list(APPEND armnnGpuFsaBackendLayerValidators_sources + GpuFsaConvolution2dValidate.cpp + GpuFsaConvolution2dValidate.hpp + ) + +add_library(armnnGpuFsaBackendLayerValidators OBJECT ${armnnGpuFsaBackendLayerValidators_sources}) +target_include_directories(armnnGpuFsaBackendLayerValidators PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn) +target_include_directories(armnnGpuFsaBackendLayerValidators PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils) +target_include_directories(armnnGpuFsaBackendLayerValidators PRIVATE ${PROJECT_SOURCE_DIR}/src/backends) diff --git a/src/backends/gpuFsa/layerValidators/GpuFsaConvolution2dValidate.cpp b/src/backends/gpuFsa/layerValidators/GpuFsaConvolution2dValidate.cpp new file mode 100644 index 0000000000..2b6c2ee3dc --- /dev/null +++ b/src/backends/gpuFsa/layerValidators/GpuFsaConvolution2dValidate.cpp @@ -0,0 +1,88 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "GpuFsaConvolution2dValidate.hpp" + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +namespace armnn +{ + +using namespace armcomputetensorutils; + +arm_compute::Status GpuFsaConvolution2dValidate(const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases) +{ + using namespace arm_compute::experimental::dynamic_fusion; + + // Create a new workload sketch, for validation purposes + auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context(); + auto gpuCtx = GpuWorkloadContext(&compileCtx); + GpuWorkloadSketch sketch{ &gpuCtx }; + + // Build and create tensor infos using the sketch + const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout); + aclWeightsInfo.set_are_values_constant(weights.IsConstant()); + + auto inputInfo = sketch.create_tensor_info(aclInputInfo); + auto weightInfo = sketch.create_tensor_info(aclWeightsInfo); + + // Only create the bias tensor info if enabled, otherwise pass nullptr to validate_op + arm_compute::TensorInfo aclBiasInfo; + arm_compute::TensorInfo biasSketchInfo; + arm_compute::TensorInfo* biasSketchInfoPtr = nullptr; + + if (descriptor.m_BiasEnabled) + { + ARMNN_ASSERT(biases.has_value()); + aclBiasInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout); + aclBiasInfo.set_are_values_constant(biases.value().IsConstant()); + + biasSketchInfo = sketch.create_tensor_info(aclBiasInfo); + biasSketchInfoPtr = &biasSketchInfo; + } + + // Set Conv2d attributes using descriptor + const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX, descriptor.m_DilationY); + const arm_compute::Padding2D aclPadInfo = BuildArmComputePaddingInfo(descriptor); + const arm_compute::Size2D aclStrideInfo = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY); + + Conv2dAttributes conv2DAttributes{}; + conv2DAttributes.dilation(aclDilationInfo); + conv2DAttributes.pad(aclPadInfo); + conv2DAttributes.stride(aclStrideInfo); + + { + // Validate operator, check status and update reasonIfUnsupported + return GpuConv2d::validate_op(sketch, + &inputInfo, + &weightInfo, + biasSketchInfoPtr, + conv2DAttributes); + } +} + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/gpuFsa/layerValidators/GpuFsaConvolution2dValidate.hpp b/src/backends/gpuFsa/layerValidators/GpuFsaConvolution2dValidate.hpp new file mode 100644 index 0000000000..ecdb3cf597 --- /dev/null +++ b/src/backends/gpuFsa/layerValidators/GpuFsaConvolution2dValidate.hpp @@ -0,0 +1,19 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include +#include + +#include + +namespace armnn { + +arm_compute::Status GpuFsaConvolution2dValidate(const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + const Optional& biases); + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp index 09aab3f7f0..f162df0b55 100644 --- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp +++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp @@ -1,13 +1,64 @@ // -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include #include +#include + #include #include -using namespace armnn; \ No newline at end of file +using namespace armnn; + +TEST_SUITE("GpuFsaLayerSupport") +{ + +TEST_CASE("IsLayerSupportedGpuFsaConv2d") +{ + TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32); + TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32); + TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true); + TensorInfo biasesInfo ({ 1 }, DataType::Float32, 0.0f, 0, true); + + Convolution2dDescriptor desc; + desc.m_BiasEnabled = true; + desc.m_DataLayout = DataLayout::NHWC; + + GpuFsaLayerSupport supportChecker; + std::string reasonIfNotSupported; + auto supported = supportChecker.IsLayerSupported(LayerType::Convolution2d, + {inputInfo, outputInfo, weightsInfo, biasesInfo}, + desc, + EmptyOptional(), + EmptyOptional(), + reasonIfNotSupported); + CHECK(supported); +} + +TEST_CASE("IsLayerSupportedGpuFsaConv2dUnsupported") +{ + TensorInfo inputInfo ({ 1, 5, 5, 1 }, DataType::Float32); + TensorInfo outputInfo({ 1, 3, 3, 1 }, DataType::Float32); + TensorInfo weightsInfo({ 1, 3, 3, 1 }, DataType::Float32, 0.0f, 0, true); + + // NCHW is unsupported. + Convolution2dDescriptor desc; + desc.m_DataLayout = DataLayout::NCHW; + + GpuFsaLayerSupport supportChecker; + std::string reasonIfNotSupported; + auto supported = supportChecker.IsLayerSupported(LayerType::Convolution2d, + {inputInfo, outputInfo, weightsInfo, TensorInfo()}, + desc, + EmptyOptional(), + EmptyOptional(), + reasonIfNotSupported); + CHECK(!supported); + REQUIRE(reasonIfNotSupported.find("NCHW not supported by this kernel") != std::string::npos); +} + +} \ No newline at end of file -- cgit v1.2.1