aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2021-09-01 16:30:34 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2021-09-29 09:54:21 +0000
commite89dd69474e237bed1066849ea1f6a4893e4a1a4 (patch)
tree4569016aa0d12bae938e64b6ed05fe4093229009
parent656f9d9ed51d2226074192071cf6374ff25c154e (diff)
downloadarmnn-e89dd69474e237bed1066849ea1f6a4893e4a1a4.tar.gz
IVGCVSW-3707 Add Channel Shuffle Workload to CpuAcc backend
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I2e3dee3c73fe58c7cfcb3ce3667884202f46e6aa
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp17
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp5
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp6
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp3
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp3
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt2
-rw-r--r--src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp94
-rw-r--r--src/backends/neon/workloads/NeonChannelShuffleWorkload.hpp30
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp1
10 files changed, 160 insertions, 2 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 154108e712..ec64f902da 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -28,13 +28,15 @@
#include "workloads/NeonBatchNormalizationWorkload.hpp"
#include "workloads/NeonBatchToSpaceNdWorkload.hpp"
#include "workloads/NeonCastWorkload.hpp"
-#include "workloads/NeonExpWorkload.hpp"
+#include "workloads/NeonChannelShuffleWorkload.hpp"
#include "workloads/NeonComparisonWorkload.hpp"
+#include "workloads/NeonConcatWorkload.hpp"
#include "workloads/NeonConstantWorkload.hpp"
#include "workloads/NeonConvolution2dWorkload.hpp"
#include "workloads/NeonDepthToSpaceWorkload.hpp"
#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
#include "workloads/NeonDequantizeWorkload.hpp"
+#include "workloads/NeonExpWorkload.hpp"
#include "workloads/NeonInstanceNormalizationWorkload.hpp"
#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
#include "workloads/NeonLogWorkload.hpp"
@@ -45,7 +47,6 @@
#include "workloads/NeonLstmFloatWorkload.hpp"
#include "workloads/NeonMaximumWorkload.hpp"
#include "workloads/NeonMeanWorkload.hpp"
-#include "workloads/NeonConcatWorkload.hpp"
#include "workloads/NeonMinimumWorkload.hpp"
#include "workloads/NeonMultiplicationWorkload.hpp"
#include "workloads/NeonDivisionWorkload.hpp"
@@ -233,6 +234,18 @@ bool NeonLayerSupport::IsCastSupported(const TensorInfo& input,
output);
}
+bool NeonLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ChannelShuffleDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonChannelShuffleValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
bool NeonLayerSupport::IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index c2c81f480c..fc1e1f6125 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -57,6 +57,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsChannelShuffleSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ChannelShuffleDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 5ccec62650..9ec7583b18 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -178,6 +178,12 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateCast(const CastQueueDescri
return std::make_unique<NeonCastWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<NeonChannelShuffleWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index e791bbcfd5..41fc506aaa 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -74,6 +74,9 @@ public:
std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& Info) const override;
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 9906c80033..9869af0615 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -29,6 +29,7 @@ BACKEND_SOURCES := \
workloads/NeonBatchNormalizationWorkload.cpp \
workloads/NeonBatchToSpaceNdWorkload.cpp \
workloads/NeonCastWorkload.cpp \
+ workloads/NeonChannelShuffleWorkload.cpp \
workloads/NeonComparisonWorkload.cpp \
workloads/NeonConcatWorkload.cpp \
workloads/NeonConstantWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 75f9648f2d..65870a319d 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -1419,6 +1419,9 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CastUIntToFloat, CastUInt8ToFloat2dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToIn8, CastFloat32ToInt82dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToUInt8, CastFloat32ToUInt82dTest)
+// ChannelShuffle
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DFloat32, ChannelShuffle4DTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(ChannelShuffle4DQAsymmU8, ChannelShuffle4DTest<DataType::QAsymmU8>)
#if defined(ARMNNREF_ENABLED)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index d08dd7e704..6451f4c5d7 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -18,6 +18,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonBatchToSpaceNdWorkload.hpp
NeonCastWorkload.cpp
NeonCastWorkload.hpp
+ NeonChannelShuffleWorkload.cpp
+ NeonChannelShuffleWorkload.hpp
NeonComparisonWorkload.cpp
NeonComparisonWorkload.hpp
NeonConcatWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
new file mode 100644
index 0000000000..b28ee447b3
--- /dev/null
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
@@ -0,0 +1,94 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonChannelShuffleWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonChannelShuffleValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ChannelShuffleDescriptor& descriptor)
+{
+ arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ // In Arm NN and in NNAPI, channel shuffle implementation is datalayout agnostic and it has axis as a parameter.
+ // The channel shuffle Implementation for Neon is dependent on datalayout and does not have axis as a parameter,
+ // it only supports channel shuffle for 4D tensors in dimension C (1 or 3).
+ arm_compute::DataLayout aclDataLayout;
+ if (input.GetNumDimensions() == 4)
+ {
+ switch (descriptor.m_Axis)
+ {
+ case 1:
+ aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
+ break;
+ case 3:
+ aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
+ break;
+ default:
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported axis"};
+ }
+ aclInputInfo.set_data_layout(aclDataLayout);
+ aclOutputInfo.set_data_layout(aclDataLayout);
+ return arm_compute::NEChannelShuffleLayer::validate(&aclInputInfo, &aclOutputInfo, descriptor.m_NumGroups);
+ }
+ else
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported number of dimensions"};
+ }
+}
+
+NeonChannelShuffleWorkload::NeonChannelShuffleWorkload(const ChannelShuffleQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<ChannelShuffleQueueDescriptor>(descriptor, info)
+{
+ // Report Profiling Details
+ ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonChannelShufflenWorkload_Construct",
+ descriptor.m_Parameters,
+ info,
+ this->GetGuid());
+
+ m_Data.ValidateInputsOutputs("NeonChannelShuffleWorkload", 1, 1);
+
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ // In Arm NN and in NNAPI, channel shuffle implementation is datalayout agnostic and it has axis as a parameter.
+ // The channel shuffle Implementation for Neon is dependent on datalayout and does not have axis as a parameter,
+ // it only supports channel shuffle for 4D tensors in dimension C (1 or 3).
+ arm_compute::DataLayout aclDataLayout;
+ switch (descriptor.m_Parameters.m_Axis)
+ {
+ case 1:
+ aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
+ break;
+ case 3:
+ aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
+ break;
+ default:
+ ARMNN_ASSERT_MSG(false, "Unsupported axis");
+ break;
+ }
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ m_ChannelShuffleLayer.configure(&input, &output, descriptor.m_Parameters.m_NumGroups);
+}
+
+void NeonChannelShuffleWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonChannelShuffleWorkload_Execute", this->GetGuid());
+ m_ChannelShuffleLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.hpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.hpp
new file mode 100644
index 0000000000..f0f20ae711
--- /dev/null
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NEChannelShuffleLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonChannelShuffleValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ChannelShuffleDescriptor& descriptor);
+
+class NeonChannelShuffleWorkload : public BaseWorkload<ChannelShuffleQueueDescriptor>
+{
+public:
+ NeonChannelShuffleWorkload(const ChannelShuffleQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NEChannelShuffleLayer m_ChannelShuffleLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 2fb4b17eff..4d51d186e9 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -11,6 +11,7 @@
#include "NeonBatchNormalizationWorkload.hpp"
#include "NeonBatchToSpaceNdWorkload.hpp"
#include "NeonCastWorkload.hpp"
+#include "NeonChannelShuffleWorkload.hpp"
#include "NeonComparisonWorkload.hpp"
#include "NeonConcatWorkload.hpp"
#include "NeonConstantWorkload.hpp"