From 1222dbd3ddc4e819f8d1b204535bfd222cd1eadd Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 2 Sep 2021 13:58:52 +0100 Subject: IVGCVSW-3706 Add Channel Shuffle Workload to GpuAcc backend Signed-off-by: Teresa Charlin Change-Id: Ic37fc49a97a5ca570a23bc415d15ee3841534336 --- src/backends/cl/ClLayerSupport.cpp | 19 ++++- src/backends/cl/ClLayerSupport.hpp | 5 ++ src/backends/cl/ClWorkloadFactory.cpp | 6 ++ src/backends/cl/ClWorkloadFactory.hpp | 3 + src/backends/cl/backend.mk | 1 + src/backends/cl/test/ClLayerTests.cpp | 4 + src/backends/cl/workloads/CMakeLists.txt | 2 + .../cl/workloads/ClChannelShuffleWorkload.cpp | 98 ++++++++++++++++++++++ .../cl/workloads/ClChannelShuffleWorkload.hpp | 32 +++++++ src/backends/cl/workloads/ClWorkloads.hpp | 1 + 10 files changed, 168 insertions(+), 3 deletions(-) create mode 100644 src/backends/cl/workloads/ClChannelShuffleWorkload.cpp create mode 100644 src/backends/cl/workloads/ClChannelShuffleWorkload.hpp diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index f9848ffd05..087302157f 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -26,6 +26,7 @@ #include "workloads/ClBatchNormalizationFloatWorkload.hpp" #include "workloads/ClBatchToSpaceNdWorkload.hpp" #include "workloads/ClCastWorkload.hpp" +#include "workloads/ClChannelShuffleWorkload.hpp" #include "workloads/ClComparisonWorkload.hpp" #include "workloads/ClConstantWorkload.hpp" #include "workloads/ClConvertFp16ToFp32Workload.hpp" @@ -242,6 +243,18 @@ bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, nullptr); } +bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, + const TensorInfo& output, + const BatchToSpaceNdDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor); +} + bool ClLayerSupport::IsCastSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const @@ -252,12 +265,12 @@ bool ClLayerSupport::IsCastSupported(const TensorInfo& input, output); } -bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, +bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input, const TensorInfo& output, - const BatchToSpaceNdDescriptor& descriptor, + const ChannelShuffleDescriptor& descriptor, Optional reasonIfUnsupported) const { - FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate, + FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate, reasonIfUnsupported, input, output, diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index b7815416f4..43ae428163 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -56,6 +56,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsChannelShuffleSupported(const TensorInfo& input, + const TensorInfo& output, + const ChannelShuffleDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsComparisonSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& ouput, diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index 4568177c02..530cb690d9 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -242,6 +242,12 @@ std::unique_ptr ClWorkloadFactory::CreateCast(const CastQueueDescript return MakeWorkload(descriptor, info, m_CLCompileContext); } +std::unique_ptr ClWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload(descriptor, info, m_CLCompileContext); +} + std::unique_ptr ClWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp index 32b0d48167..7f01ee0918 100644 --- a/src/backends/cl/ClWorkloadFactory.hpp +++ b/src/backends/cl/ClWorkloadFactory.hpp @@ -77,6 +77,9 @@ public: std::unique_ptr CreateCast(const CastQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateComparison(const ComparisonQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk index 16748cf72a..f00b2f345c 100644 --- a/src/backends/cl/backend.mk +++ b/src/backends/cl/backend.mk @@ -33,6 +33,7 @@ BACKEND_SOURCES := \ workloads/ClBatchNormalizationFloatWorkload.cpp \ workloads/ClBatchToSpaceNdWorkload.cpp \ workloads/ClCastWorkload.cpp \ + workloads/ClChannelShuffleWorkload.cpp \ workloads/ClComparisonWorkload.cpp \ workloads/ClConcatWorkload.cpp \ workloads/ClConstantWorkload.cpp \ diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 2bb63b5347..f52126dd4c 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -1920,6 +1920,10 @@ ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloat16ToFloat32, ClContextControlFixture, ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloatToFloat16, ClContextControlFixture, CastFloat32ToFloat162dTest) ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloatToUInt8, ClContextControlFixture, CastFloat32ToUInt82dTest) +// ChannelShuffle +ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DFloat32, ChannelShuffle4DTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DQAsymmU8, ChannelShuffle4DTest) + #if defined(ARMNNREF_ENABLED) TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextControlFixture") {} diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt index a351f73aa6..93ae678f98 100644 --- a/src/backends/cl/workloads/CMakeLists.txt +++ b/src/backends/cl/workloads/CMakeLists.txt @@ -18,6 +18,8 @@ list(APPEND armnnClBackendWorkloads_sources ClBatchToSpaceNdWorkload.hpp ClCastWorkload.cpp ClCastWorkload.hpp + ClChannelShuffleWorkload.cpp + ClChannelShuffleWorkload.hpp ClComparisonWorkload.cpp ClComparisonWorkload.hpp ClConcatWorkload.cpp diff --git a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp new file mode 100644 index 0000000000..751056a9a0 --- /dev/null +++ b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp @@ -0,0 +1,98 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ClChannelShuffleWorkload.hpp" +#include "ClWorkloadUtils.hpp" + +#include + +#include + +#include + +using namespace armnn::armcomputetensorutils; + +namespace armnn +{ + +arm_compute::Status ClChannelShuffleValidate(const TensorInfo& input, + const TensorInfo& output, + const ChannelShuffleDescriptor& descriptor) +{ + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); + arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); + + // In Arm NN and in NNAPI, channel shuffle implementation is datalayout agnostic and it has axis as a parameter. + // The channel shuffle Implementation for Neon is dependent on datalayout and does not have axis as a parameter, + // it only supports channel shuffle for 4D tensors in dimension C (1 or 3). + arm_compute::DataLayout aclDataLayout; + if (input.GetNumDimensions() == 4) + { + switch (descriptor.m_Axis) + { + case 1: + aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); + break; + case 3: + aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC); + break; + default: + return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported axis"}; + } + aclInputInfo.set_data_layout(aclDataLayout); + aclOutputInfo.set_data_layout(aclDataLayout); + return arm_compute::CLChannelShuffleLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_NumGroups); + } + else + { + return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported number of dimensions"}; + } +} + +ClChannelShuffleWorkload::ClChannelShuffleWorkload(const ChannelShuffleQueueDescriptor& descriptor, + const WorkloadInfo& info, + const arm_compute::CLCompileContext& clCompileContext) + : BaseWorkload(descriptor, info) +{ + // Report Profiling Details + ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClChannelShufflenWorkload_Construct", + descriptor.m_Parameters, + info, + this->GetGuid()); + + m_Data.ValidateInputsOutputs("ClChannelShuffleWorkload", 1, 1); + + arm_compute::ICLTensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); + + // In Arm NN and in NNAPI, channel shuffle implementation is datalayout agnostic and it has axis as a parameter. + // The channel shuffle Implementation for Neon is dependent on datalayout and does not have axis as a parameter, + // it only supports channel shuffle for 4D tensors in dimension C (1 or 3). + arm_compute::DataLayout aclDataLayout; + switch (descriptor.m_Parameters.m_Axis) + { + case 1: + aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); + break; + case 3: + aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC); + break; + default: + ARMNN_ASSERT_MSG(false, "Unsupported axis"); + break; + } + input.info()->set_data_layout(aclDataLayout); + output.info()->set_data_layout(aclDataLayout); + + m_ChannelShuffleLayer.configure(clCompileContext, &input, &output, descriptor.m_Parameters.m_NumGroups); +} + +void ClChannelShuffleWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClChannelShuffleWorkload_Execute", this->GetGuid()); + RunClFunction(m_ChannelShuffleLayer, CHECK_LOCATION()); +} + +} // namespace armnn diff --git a/src/backends/cl/workloads/ClChannelShuffleWorkload.hpp b/src/backends/cl/workloads/ClChannelShuffleWorkload.hpp new file mode 100644 index 0000000000..5ef84e6a1e --- /dev/null +++ b/src/backends/cl/workloads/ClChannelShuffleWorkload.hpp @@ -0,0 +1,32 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include + +namespace armnn +{ + +arm_compute::Status ClChannelShuffleValidate(const TensorInfo& input, + const TensorInfo& output, + const ChannelShuffleDescriptor& descriptor); + +class ClChannelShuffleWorkload : public BaseWorkload +{ +public: + ClChannelShuffleWorkload(const ChannelShuffleQueueDescriptor& descriptor, + const WorkloadInfo& info, + const arm_compute::CLCompileContext& clCompileContext); + virtual void Execute() const override; + +private: + mutable arm_compute::CLChannelShuffleLayer m_ChannelShuffleLayer; +}; + +} // namespace armnn diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp index 88d1c1ba93..3e0984eddf 100644 --- a/src/backends/cl/workloads/ClWorkloads.hpp +++ b/src/backends/cl/workloads/ClWorkloads.hpp @@ -13,6 +13,7 @@ #include "ClBatchNormalizationFloatWorkload.hpp" #include "ClBatchToSpaceNdWorkload.hpp" #include "ClCastWorkload.hpp" +#include "ClChannelShuffleWorkload.hpp" #include "ClConvolution2dWorkload.hpp" #include "ClDepthToSpaceWorkload.hpp" #include "ClDepthwiseConvolutionWorkload.hpp" -- cgit v1.2.1