aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2020-06-29 16:27:03 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-07-01 08:26:47 +0000
commit526647333571169076f5e72c9fb18c71025bf7c0 (patch)
tree6dc559a7b0fae3705172b09a88fa552926652040 /src/backends/backendsCommon
parentcbd2c230b7ce5f26e2ccccf36b7ad450f6e1ad09 (diff)
downloadarmnn-526647333571169076f5e72c9fb18c71025bf7c0.tar.gz
IVGCVSW-4903 Connect axis parameter in Gather from android to ACL.
!android-nn-driver:3302 Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ifbc49acb5272f8a36719bb68676e44817190537d
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp445
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp9
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp2
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp5
-rw-r--r--src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp5
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp4
6 files changed, 245 insertions, 225 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index e509a7b929..52e615a2d9 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,177 +37,177 @@ bool DefaultLayerSupport(const char* func,
namespace armnn
{
-bool LayerSupportBase::IsAbsSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsAbsSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsActivationSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const ActivationDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsActivationSupported(const TensorInfo&, // input
+ const TensorInfo&, //output
+ const ActivationDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsAdditionSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsAdditionSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &/*input*/,
- const armnn::TensorInfo &/*output*/,
- const armnn::ArgMinMaxDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo&, // input
+ const armnn::TensorInfo&, // output
+ const armnn::ArgMinMaxDescriptor&, // descriptor
armnn::Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const TensorInfo& /*mean*/,
- const TensorInfo& /*var*/,
- const TensorInfo& /*beta*/,
- const TensorInfo& /*gamma*/,
- const BatchNormalizationDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo&, //input
+ const TensorInfo&, // output
+ const TensorInfo&, //mean
+ const TensorInfo&, //var
+ const TensorInfo&, //beta
+ const TensorInfo&, //gamma
+ const BatchNormalizationDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const BatchToSpaceNdDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const BatchToSpaceNdDescriptor&, //descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsComparisonSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
- const ComparisonDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsComparisonSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
+ const ComparisonDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> /*inputs*/,
- const TensorInfo& /*output*/,
- const OriginsDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*>, // inputs
+ const TensorInfo&, // output
+ const OriginsDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConstantSupported(const TensorInfo& /*output*/,
+bool LayerSupportBase::IsConstantSupported(const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvertBf16ToFp32Supported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsConvertBf16ToFp32Supported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvertFp32ToBf16Supported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsConvertFp32ToBf16Supported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const Convolution2dDescriptor& /*descriptor*/,
- const TensorInfo& /*weights*/,
- const Optional<TensorInfo>& /*biases*/,
+bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const Convolution2dDescriptor&, // descriptor
+ const TensorInfo&, // weights
+ const Optional<TensorInfo>&, // biases
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDebugSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsDebugSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const DepthToSpaceDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const DepthToSpaceDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const DepthwiseConvolution2dDescriptor& /*descriptor*/,
- const TensorInfo& /*weights*/,
- const Optional<TensorInfo>& /*biases*/,
+bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo&, //input
+ const TensorInfo&, //output
+ const DepthwiseConvolution2dDescriptor&, // descriptor
+ const TensorInfo&, // weights
+ const Optional<TensorInfo>&, // biases
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsDequantizeSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo& /*boxEncodings*/,
- const TensorInfo& /*scores*/,
- const TensorInfo& /*anchors*/,
- const TensorInfo& /*detectionBoxes*/,
- const TensorInfo& /*detectionClasses*/,
- const TensorInfo& /*detectionScores*/,
- const TensorInfo& /*numDetections*/,
- const DetectionPostProcessDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo&, // boxEncodings
+ const TensorInfo&, // scores
+ const TensorInfo&, // anchors
+ const TensorInfo&, // detectionBoxes
+ const TensorInfo&, // detectionClasses
+ const TensorInfo&, // detectionScores
+ const TensorInfo&, // numDetections
+ const DetectionPostProcessDescriptor&, //descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const DepthwiseConvolution2dDescriptor& /*descriptor*/,
- const TensorInfo& /*weights*/,
- const Optional<TensorInfo>& /*biases*/,
+bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const DepthwiseConvolution2dDescriptor&, // descriptor
+ const TensorInfo&,// weights
+ const Optional<TensorInfo>&, // biases
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDivisionSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsDivisionSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
@@ -233,139 +233,148 @@ bool LayerSupportBase::IsElementwiseUnarySupported(const TensorInfo& input,
return false;
}
-bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& /*input0*/,
- const armnn::TensorInfo& /*input1*/,
- const armnn::TensorInfo& /*output*/,
+bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo&, // input0
+ const armnn::TensorInfo&, // input1
+ const armnn::TensorInfo&, // output
armnn::Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& /*input*/,
- const FakeQuantizationDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo&, // input
+ const FakeQuantizationDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFillSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const FillDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsFillSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const FillDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFloorSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsFloorSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const TensorInfo& /*weights*/,
- const TensorInfo& /*biases*/,
- const FullyConnectedDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const TensorInfo&, // weights
+ const TensorInfo&, // biases
+ const FullyConnectedDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& /*input0*/,
- const armnn::TensorInfo& /*input1*/,
- const armnn::TensorInfo& /*output*/,
+bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0
+ const armnn::TensorInfo&, // input1
+ const armnn::TensorInfo&, // output
armnn::Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsGreaterSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo&, // input0
+ const armnn::TensorInfo&, // input1
+ const armnn::TensorInfo&, // output
+ const GatherDescriptor&, // descriptor
+ armnn::Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsGreaterSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsInputSupported(const TensorInfo& /*input*/,
+bool LayerSupportBase::IsInputSupported(const TensorInfo&, // input
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const InstanceNormalizationDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const InstanceNormalizationDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const L2NormalizationDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const L2NormalizationDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const LogSoftmaxDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const LogSoftmaxDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsLstmSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*outputStateIn*/,
- const TensorInfo& /*cellStateIn*/,
- const TensorInfo& /*scratchBuffer*/,
- const TensorInfo& /*outputStateOut*/,
- const TensorInfo& /*cellStateOut*/,
- const TensorInfo& /*output*/,
- const LstmDescriptor& /*descriptor*/,
- const LstmInputParamsInfo& /*paramsInfo*/,
+bool LayerSupportBase::IsLstmSupported(const TensorInfo&, // input
+ const TensorInfo&, // outputStateIn
+ const TensorInfo&, // cellStateIn
+ const TensorInfo&, // scratchBuffer
+ const TensorInfo&, // outputStateOut
+ const TensorInfo&, // cellStateOut
+ const TensorInfo&, // output
+ const LstmDescriptor&, // descriptor
+ const LstmInputParamsInfo&, // paramsInfo
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMaximumSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsMaximumSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMeanSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const MeanDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsMeanSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const MeanDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& /*input*/,
- const armnn::TensorInfo& /*output*/,
- armnn::Optional<std::string &> /*reasonIfUnsupported*/) const
+bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo&, // input
+ const armnn::TensorInfo&, // output
+ armnn::Optional<std::string &> ) const // reasonIfUnsupported
{
return true;
}
-bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo& /*input*/,
- const armnn::TensorInfo& /*output*/,
- armnn::Optional<std::string &> /*reasonIfUnsupported*/) const
+bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo&, // input
+ const armnn::TensorInfo&, // output
+ armnn::Optional<std::string &> ) const // reasonIfUnsupported
{
return true;
}
-bool LayerSupportBase::IsMergeSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsMergeSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
@@ -379,194 +388,194 @@ bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> in
return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMinimumSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsMinimumSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const NormalizationDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsNormalizationSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const NormalizationDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsOutputSupported(const TensorInfo& /*output*/,
+bool LayerSupportBase::IsOutputSupported(const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPadSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const PadDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsPadSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const PadDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPermuteSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const PermuteDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsPermuteSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const PermuteDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const Pooling2dDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsPooling2dSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const Pooling2dDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& /*input*/,
- const PreCompiledDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo&, // input
+ const PreCompiledDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPreluSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*alpha*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsPreluSupported(const TensorInfo&, // input
+ const TensorInfo&, // alpha
+ const TensorInfo&, // output
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& /*input*/,
- const armnn::TensorInfo& /*output*/,
+bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo&, // input
+ const armnn::TensorInfo&, // output
armnn::Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsQLstmSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*previousOutputIn*/,
- const TensorInfo& /*previousCellStateIn*/,
- const TensorInfo& /*outputStateOut*/,
- const TensorInfo& /*cellStateOut*/,
- const TensorInfo& /*output*/,
- const QLstmDescriptor& /*descriptor*/,
- const LstmInputParamsInfo& /*paramsInfo*/,
+bool LayerSupportBase::IsQLstmSupported(const TensorInfo&, // input
+ const TensorInfo&, // previousOutputIn
+ const TensorInfo&, // previousCellStateIn
+ const TensorInfo&, // outputStateOut
+ const TensorInfo&, // cellStateOut
+ const TensorInfo&, // output
+ const QLstmDescriptor&, // descriptor
+ const LstmInputParamsInfo&, // paramsInfo
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*previousCellStateIn*/,
- const TensorInfo& /*previousOutputIn*/,
- const TensorInfo& /*cellStateOut*/,
- const TensorInfo& /*output*/,
- const QuantizedLstmInputParamsInfo& /*paramsInfo*/,
+bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo&, // input
+ const TensorInfo&, // previousCellStateIn
+ const TensorInfo&, // previousOutputIn
+ const TensorInfo&, // cellStateOut
+ const TensorInfo&, // output
+ const QuantizedLstmInputParamsInfo&, // paramsInfo
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsReshapeSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const ReshapeDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsReshapeSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const ReshapeDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsResizeSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const ResizeDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsResizeSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const ResizeDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &/*input*/,
- const TensorInfo &/*output*/,
+bool LayerSupportBase::IsRsqrtSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSliceSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const SliceDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsSliceSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const SliceDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const SoftmaxDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const SoftmaxDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
/**/
-bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const SpaceToBatchNdDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const SpaceToBatchNdDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const SpaceToDepthDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const SpaceToDepthDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo& /*input*/,
- const ViewsDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input
+ const ViewsDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo& /*input*/,
- const std::vector<std::reference_wrapper<TensorInfo>>& /*outputs*/,
- const ViewsDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo&, // input
+ const std::vector<std::reference_wrapper<TensorInfo>>&, // outputs
+ const ViewsDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& /*inputs*/,
- const TensorInfo& /*output*/,
- const StackDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>&, // inputs
+ const TensorInfo&, // output
+ const StackDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& /*inputs*/,
- const std::vector<const TensorInfo*>& /*outputs*/,
- const StandInDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>&, // inputs
+ const std::vector<const TensorInfo*>&, // outputs
+ const StandInDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
if (reasonIfUnsupported)
@@ -580,44 +589,44 @@ bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>&
return false;
}
-bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const StridedSliceDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const StridedSliceDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output*/,
+bool LayerSupportBase::IsSubtractionSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSwitchSupported(const TensorInfo& /*input0*/,
- const TensorInfo& /*input1*/,
- const TensorInfo& /*output0*/,
- const TensorInfo& /*output1*/,
+bool LayerSupportBase::IsSwitchSupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output0
+ const TensorInfo&, // output1
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const TransposeConvolution2dDescriptor& /*descriptor*/,
- const TensorInfo& /*weights*/,
- const Optional<TensorInfo>& /*biases*/,
+bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const TransposeConvolution2dDescriptor&, // descriptor
+ const TensorInfo&, // weights
+ const Optional<TensorInfo>&, // biases
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
-}
+}
-bool LayerSupportBase::IsTransposeSupported(const TensorInfo& /*input*/,
- const TensorInfo& /*output*/,
- const TransposeDescriptor& /*descriptor*/,
+bool LayerSupportBase::IsTransposeSupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const TransposeDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index aff4529417..8d5535ab4e 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -159,11 +159,18 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
bool IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const GatherDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
bool IsGreaterSupported(const TensorInfo& input0,
const TensorInfo& input1,
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index f2f7089040..6b2c00e298 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -471,7 +471,7 @@ struct RsqrtQueueDescriptor : QueueDescriptor
void Validate(const WorkloadInfo& workloadInfo) const;
};
-struct GatherQueueDescriptor : QueueDescriptor
+struct GatherQueueDescriptor : QueueDescriptorWithParameters<GatherDescriptor>
{
void Validate(const WorkloadInfo& workloadInfo) const;
};
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index d2565cf21d..788cb7e712 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -414,9 +414,12 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ auto cLayer = PolymorphicDowncast<const GatherLayer*>(&layer);
+ const GatherDescriptor& descriptor = cLayer->GetParameters();
result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
input1,
OverrideDataType(output, dataType),
+ descriptor,
reason);
break;
}
diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
index 1c97bef467..82f94512c3 100644
--- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,9 +19,10 @@ armnn::INetworkPtr CreateGatherNetwork(const armnn::TensorInfo& paramsInfo,
{
armnn::INetworkPtr net(armnn::INetwork::Create());
+ armnn::GatherDescriptor descriptor;
armnn::IConnectableLayer* paramsLayer = net->AddInputLayer(0);
armnn::IConnectableLayer* indicesLayer = net->AddConstantLayer(armnn::ConstTensor(indicesInfo, indicesData));
- armnn::IConnectableLayer* gatherLayer = net->AddGatherLayer("gather");
+ armnn::IConnectableLayer* gatherLayer = net->AddGatherLayer(descriptor, "gather");
armnn::IConnectableLayer* outputLayer = net->AddOutputLayer(0, "output");
Connect(paramsLayer, gatherLayer, paramsInfo, 0, 0);
Connect(indicesLayer, gatherLayer, indicesInfo, 0, 1);
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index dcd073d279..e30cbb3d31 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -551,7 +551,7 @@ DECLARE_LAYER_POLICY_1_PARAM(Floor)
DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
-DECLARE_LAYER_POLICY_1_PARAM(Gather)
+DECLARE_LAYER_POLICY_2_PARAM(Gather)
DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)