aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorSimon Obute <simon.obute@arm.com>2021-09-03 15:50:13 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2021-09-24 16:06:30 +0100
commit51f67776a695c217a32596af806afeeb080f5528 (patch)
tree33ccfd87ba365bcc6fc86d5a2181991a130b3061 /src/backends/backendsCommon
parentf10b15a8946f39bdf3f60cebc59d2963069eedca (diff)
downloadarmnn-51f67776a695c217a32596af806afeeb080f5528.tar.gz
IVGCVSW-3705 Add Channel Shuffle Front end and Ref Implementation
* Add front end * Add reference workload * Add unit tests * Add Serializer and Deserializer * Update ArmNN Versioning Signed-off-by: Simon Obute <simon.obute@arm.com> Change-Id: I9ac1f953af3974382eac8e8d62d794d2344e8f47
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp8
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp13
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp21
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp269
-rw-r--r--src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp31
12 files changed, 361 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 138d45367e..2753c927d5 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -95,6 +95,14 @@ bool LayerSupportBase::IsCastSupported(const TensorInfo&, //input
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsChannelShuffleSupported(const TensorInfo&, //input
+ const TensorInfo&, //output
+ const ChannelShuffleDescriptor&, //descriptor
+ Optional<std::string &> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsComparisonSupported(const TensorInfo&, // input0
const TensorInfo&, // input1
const TensorInfo&, // output
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 533a2c6bdd..cc68a220e2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -51,6 +51,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsChannelShuffleSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ChannelShuffleDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index d87f858601..a6def847fa 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2971,6 +2971,19 @@ void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
}
+void ChannelShuffleQueueDescriptor::Validate(const WorkloadInfo &workloadInfo) const
+{
+ const std::string descriptorName{"TransposeQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 1);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+}
+
void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
const std::string descriptorName{"QLstmQueueDescriptor"};
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 78da00be5d..b90c29c1b4 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -747,4 +747,9 @@ struct UnidirectionalSequenceLstmQueueDescriptor : QueueDescriptorWithParameters
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct ChannelShuffleQueueDescriptor : QueueDescriptorWithParameters<ChannelShuffleDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 3f5972dab6..00263eca04 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -152,6 +152,21 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::ChannelShuffle:
+ {
+ auto cLayer = PolymorphicDowncast<const ChannelShuffleLayer*>(&layer);
+
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+
+ const ChannelShuffleDescriptor descriptor = cLayer->GetParameters();
+
+ result = layerSupportObject.IsChannelShuffleSupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ descriptor,
+ reason);
+ break;
+ }
case LayerType::Comparison:
{
auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
@@ -1501,6 +1516,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateCast(const CastQueueDescripto
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index efb8d99fa0..e84657ea13 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -90,6 +90,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateCast(const CastQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
+ virtual std::unique_ptr<IWorkload> CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
virtual std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 7ebc9975c3..7d3558c804 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -45,6 +45,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/ArgMinMaxTestImpl.cpp \
test/layerTests/BatchNormalizationTestImpl.cpp \
test/layerTests/CastTestImpl.cpp \
+ test/layerTests/ChannelShuffleTestImpl.cpp \
test/layerTests/ComparisonTestImpl.cpp \
test/layerTests/ConcatTestImpl.cpp \
test/layerTests/ConstantTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index c9bc5e74b8..292ec0efd2 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -64,6 +64,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/BatchToSpaceNdTestImpl.hpp
layerTests/CastTestImpl.cpp
layerTests/CastTestImpl.hpp
+ layerTests/ChannelShuffleTestImpl.cpp
+ layerTests/ChannelShuffleTestImpl.hpp
layerTests/ComparisonTestImpl.cpp
layerTests/ComparisonTestImpl.hpp
layerTests/ConcatTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 21b33d297b..c2d21842f2 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -620,6 +620,8 @@ DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
DECLARE_LAYER_POLICY_1_PARAM(Cast)
+DECLARE_LAYER_POLICY_2_PARAM(ChannelShuffle)
+
DECLARE_LAYER_POLICY_2_PARAM(Comparison)
DECLARE_LAYER_POLICY_2_PARAM(Concat)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 0690637500..9f1fa88b16 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -12,6 +12,7 @@
#include <backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp>
#include <backendsCommon/test/layerTests/CastTestImpl.hpp>
+#include <backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp>
#include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp>
#include <backendsCommon/test/layerTests/ConcatTestImpl.hpp>
#include <backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
new file mode 100644
index 0000000000..46ee7d960e
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.cpp
@@ -0,0 +1,269 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ChannelShuffleTestImpl.hpp"
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+namespace
+{
+
+template<typename T, size_t NumDims>
+LayerTestResult<T, NumDims> ChannelShuffleTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ armnn::ChannelShuffleDescriptor descriptor,
+ armnn::TensorInfo inputTensorInfo,
+ armnn::TensorInfo outputTensorInfo,
+ const std::vector<T>& inputData,
+ const std::vector<T>& outputExpectedData)
+{
+ IgnoreUnused(memoryManager);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ChannelShuffleQueueDescriptor data;
+ data.m_Parameters = descriptor;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateChannelShuffle(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, NumDims>(actualOutput,
+ outputExpectedData,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
+}
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleChannelShuffleTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = { 1,9,1,1 };
+ unsigned int outputShape[] = { 1,9,1,1 };
+
+ armnn::ChannelShuffleDescriptor descriptor;
+ descriptor.m_Axis = 1;
+ descriptor.m_NumGroups = 3;
+
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+ outputTensorInfo.SetQuantizationScale(1.0f);
+
+ auto input = ConvertToDataType<ArmnnType>(
+ {
+ 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f
+ },
+ inputTensorInfo);
+ auto outputExpected = ConvertToDataType<ArmnnType>(
+ {
+ 0.0f, 3.0f, 6.0f, 1.0f, 4.0f, 7.0f, 2.0f, 5.0f, 8.0f
+ },
+ outputTensorInfo);
+
+ return ChannelShuffleTestImpl<T, 4>(
+ workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputTensorInfo,
+ outputTensorInfo,
+ input,
+ outputExpected);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> ChannelShuffle2DTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = { 3, 12 };
+ unsigned int outputShape[] = { 3, 12 };
+
+ armnn::ChannelShuffleDescriptor descriptor;
+ descriptor.m_Axis = 1;
+ descriptor.m_NumGroups = 3;
+
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
+ outputTensorInfo.SetQuantizationScale(1.0f);
+
+ auto input = ConvertToDataType<ArmnnType>(
+ {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35
+ },
+ inputTensorInfo);
+
+ auto outputExpected = ConvertToDataType<ArmnnType>(
+ {
+ 0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
+ 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23,
+ 24, 28, 32, 25, 29, 33, 26, 30, 34, 27, 31, 35
+ },
+ outputTensorInfo);
+
+ return ChannelShuffleTestImpl<T, 2>(
+ workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputTensorInfo,
+ outputTensorInfo,
+ input,
+ outputExpected);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ChannelShuffle4DTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ armnn::TensorInfo inputTensorInfo;
+ armnn::TensorInfo outputTensorInfo;
+
+ unsigned int inputShape[] = { 2, 9, 1, 2 };
+ unsigned int outputShape[] = { 2, 9, 1, 2 };
+
+ armnn::ChannelShuffleDescriptor descriptor;
+ descriptor.m_Axis = 1;
+ descriptor.m_NumGroups = 3;
+
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+ outputTensorInfo.SetQuantizationScale(1.0f);
+
+ auto input = ConvertToDataType<ArmnnType>(
+ {
+ 0.0f, 1.0f,
+ 2.0f, 3.0f,
+ 4.0f, 5.0f,
+
+ 6.0f, 7.0f,
+ 8.0f, 9.0f,
+ 10.0f, 11.0f,
+
+ 12.0f, 13.0f,
+ 14.0f, 15.0f,
+ 16.0f, 17.0f,
+
+ 18.0f, 19.0f,
+ 20.0f, 21.0f,
+ 22.0f, 23.0f,
+
+ 24.0f, 25.0f,
+ 26.0f, 27.0f,
+ 28.0f, 29.0f,
+
+ 30.0f, 31.0f,
+ 32.0f, 33.0f,
+ 34.0f, 35.0f
+ },
+ inputTensorInfo);
+
+ auto outputExpected = ConvertToDataType<ArmnnType>(
+ {
+ 0.0f, 1.0f,
+ 6.0f, 7.0f,
+ 12.0f, 13.0f,
+ 2.0f, 3.0f,
+ 8.0f, 9.0f,
+ 14.0f, 15.0f,
+ 4.0f, 5.0f,
+ 10.0f, 11.0f,
+ 16.0f, 17.0f,
+
+ 18.0f, 19.0f,
+ 24.0f, 25.0f,
+ 30.0f, 31.0f,
+ 20.0f, 21.0f,
+ 26.0f, 27.0f,
+ 32.0f, 33.0f,
+ 22.0f, 23.0f,
+ 28.0f, 29.0f,
+ 34.0f, 35.0f
+ },
+ outputTensorInfo);
+
+ return ChannelShuffleTestImpl<T, 4>(
+ workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputTensorInfo,
+ outputTensorInfo,
+ input,
+ outputExpected);
+}
+
+//
+// Explicit template specializations
+//
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+SimpleChannelShuffleTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleChannelShuffleTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+ChannelShuffle2DTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+ChannelShuffle2DTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+ChannelShuffle4DTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ChannelShuffle4DTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp
new file mode 100644
index 0000000000..3500e72ae7
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp
@@ -0,0 +1,31 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleChannelShuffleTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> ChannelShuffle2DTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ChannelShuffle4DTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file