aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2021-09-02 13:58:52 +0100
committerJim Flynn <jim.flynn@arm.com>2021-09-28 21:59:08 +0000
commit1222dbd3ddc4e819f8d1b204535bfd222cd1eadd (patch)
treef603e5140f1ea19c193a0b6a67c1967f0a24d126
parentef72938147b75cc37a757d4c9926503435c8e1d5 (diff)
downloadarmnn-1222dbd3ddc4e819f8d1b204535bfd222cd1eadd.tar.gz
IVGCVSW-3706 Add Channel Shuffle Workload to GpuAcc backend
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ic37fc49a97a5ca570a23bc415d15ee3841534336
-rw-r--r--src/backends/cl/ClLayerSupport.cpp19
-rw-r--r--src/backends/cl/ClLayerSupport.hpp5
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp6
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp3
-rw-r--r--src/backends/cl/backend.mk1
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp4
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt2
-rw-r--r--src/backends/cl/workloads/ClChannelShuffleWorkload.cpp98
-rw-r--r--src/backends/cl/workloads/ClChannelShuffleWorkload.hpp32
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp1
10 files changed, 168 insertions, 3 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index f9848ffd05..087302157f 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -26,6 +26,7 @@
#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
#include "workloads/ClBatchToSpaceNdWorkload.hpp"
#include "workloads/ClCastWorkload.hpp"
+#include "workloads/ClChannelShuffleWorkload.hpp"
#include "workloads/ClComparisonWorkload.hpp"
#include "workloads/ClConstantWorkload.hpp"
#include "workloads/ClConvertFp16ToFp32Workload.hpp"
@@ -242,6 +243,18 @@ bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
nullptr);
}
+bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
@@ -252,12 +265,12 @@ bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
output);
}
-bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
+bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
const TensorInfo& output,
- const BatchToSpaceNdDescriptor& descriptor,
+ const ChannelShuffleDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
reasonIfUnsupported,
input,
output,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index b7815416f4..43ae428163 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -56,6 +56,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsChannelShuffleSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ChannelShuffleDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& ouput,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 4568177c02..530cb690d9 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -242,6 +242,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateCast(const CastQueueDescript
return MakeWorkload<ClCastWorkload>(descriptor, info, m_CLCompileContext);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<ClChannelShuffleWorkload>(descriptor, info, m_CLCompileContext);
+}
+
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 32b0d48167..7f01ee0918 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -77,6 +77,9 @@ public:
std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 16748cf72a..f00b2f345c 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -33,6 +33,7 @@ BACKEND_SOURCES := \
workloads/ClBatchNormalizationFloatWorkload.cpp \
workloads/ClBatchToSpaceNdWorkload.cpp \
workloads/ClCastWorkload.cpp \
+ workloads/ClChannelShuffleWorkload.cpp \
workloads/ClComparisonWorkload.cpp \
workloads/ClConcatWorkload.cpp \
workloads/ClConstantWorkload.cpp \
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 2bb63b5347..f52126dd4c 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -1920,6 +1920,10 @@ ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloat16ToFloat32, ClContextControlFixture,
ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloatToFloat16, ClContextControlFixture, CastFloat32ToFloat162dTest)
ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloatToUInt8, ClContextControlFixture, CastFloat32ToUInt82dTest)
+// ChannelShuffle
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DFloat32, ChannelShuffle4DTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DQAsymmU8, ChannelShuffle4DTest<DataType::QAsymmU8>)
+
#if defined(ARMNNREF_ENABLED)
TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextControlFixture") {}
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index a351f73aa6..93ae678f98 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -18,6 +18,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClBatchToSpaceNdWorkload.hpp
ClCastWorkload.cpp
ClCastWorkload.hpp
+ ClChannelShuffleWorkload.cpp
+ ClChannelShuffleWorkload.hpp
ClComparisonWorkload.cpp
ClComparisonWorkload.hpp
ClConcatWorkload.cpp
diff --git a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
new file mode 100644
index 0000000000..751056a9a0
--- /dev/null
+++ b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
@@ -0,0 +1,98 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClChannelShuffleWorkload.hpp"
+#include "ClWorkloadUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status ClChannelShuffleValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ChannelShuffleDescriptor& descriptor)
+{
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ // In Arm NN and in NNAPI, channel shuffle implementation is datalayout agnostic and it has axis as a parameter.
+ // The channel shuffle Implementation for Neon is dependent on datalayout and does not have axis as a parameter,
+ // it only supports channel shuffle for 4D tensors in dimension C (1 or 3).
+ arm_compute::DataLayout aclDataLayout;
+ if (input.GetNumDimensions() == 4)
+ {
+ switch (descriptor.m_Axis)
+ {
+ case 1:
+ aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
+ break;
+ case 3:
+ aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
+ break;
+ default:
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported axis"};
+ }
+ aclInputInfo.set_data_layout(aclDataLayout);
+ aclOutputInfo.set_data_layout(aclDataLayout);
+ return arm_compute::CLChannelShuffleLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_NumGroups);
+ }
+ else
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported number of dimensions"};
+ }
+}
+
+ClChannelShuffleWorkload::ClChannelShuffleWorkload(const ChannelShuffleQueueDescriptor& descriptor,
+ const WorkloadInfo& info,
+ const arm_compute::CLCompileContext& clCompileContext)
+ : BaseWorkload<ChannelShuffleQueueDescriptor>(descriptor, info)
+{
+ // Report Profiling Details
+ ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClChannelShufflenWorkload_Construct",
+ descriptor.m_Parameters,
+ info,
+ this->GetGuid());
+
+ m_Data.ValidateInputsOutputs("ClChannelShuffleWorkload", 1, 1);
+
+ arm_compute::ICLTensor& input = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ // In Arm NN and in NNAPI, channel shuffle implementation is datalayout agnostic and it has axis as a parameter.
+ // The channel shuffle Implementation for Neon is dependent on datalayout and does not have axis as a parameter,
+ // it only supports channel shuffle for 4D tensors in dimension C (1 or 3).
+ arm_compute::DataLayout aclDataLayout;
+ switch (descriptor.m_Parameters.m_Axis)
+ {
+ case 1:
+ aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
+ break;
+ case 3:
+ aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
+ break;
+ default:
+ ARMNN_ASSERT_MSG(false, "Unsupported axis");
+ break;
+ }
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ m_ChannelShuffleLayer.configure(clCompileContext, &input, &output, descriptor.m_Parameters.m_NumGroups);
+}
+
+void ClChannelShuffleWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClChannelShuffleWorkload_Execute", this->GetGuid());
+ RunClFunction(m_ChannelShuffleLayer, CHECK_LOCATION());
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClChannelShuffleWorkload.hpp b/src/backends/cl/workloads/ClChannelShuffleWorkload.hpp
new file mode 100644
index 0000000000..5ef84e6a1e
--- /dev/null
+++ b/src/backends/cl/workloads/ClChannelShuffleWorkload.hpp
@@ -0,0 +1,32 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLChannelShuffleLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClChannelShuffleValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ChannelShuffleDescriptor& descriptor);
+
+class ClChannelShuffleWorkload : public BaseWorkload<ChannelShuffleQueueDescriptor>
+{
+public:
+ ClChannelShuffleWorkload(const ChannelShuffleQueueDescriptor& descriptor,
+ const WorkloadInfo& info,
+ const arm_compute::CLCompileContext& clCompileContext);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::CLChannelShuffleLayer m_ChannelShuffleLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 88d1c1ba93..3e0984eddf 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -13,6 +13,7 @@
#include "ClBatchNormalizationFloatWorkload.hpp"
#include "ClBatchToSpaceNdWorkload.hpp"
#include "ClCastWorkload.hpp"
+#include "ClChannelShuffleWorkload.hpp"
#include "ClConvolution2dWorkload.hpp"
#include "ClDepthToSpaceWorkload.hpp"
#include "ClDepthwiseConvolutionWorkload.hpp"