diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-02-22 14:44:12 +0000 |
---|---|---|
committer | Teresa Charlin <teresa.charlinreyes@arm.com> | 2022-02-23 10:03:26 +0000 |
commit | 79cef69b1ec58f9ce010461eaaad04c896a4fe15 (patch) | |
tree | 0db88ad2d0f9e32dec916822d9a42d5d5ab81723 /src/backends/cl/ClLayerSupport.cpp | |
parent | ad9171701e6032b3ddf3573f85780bae30c512c6 (diff) | |
download | armnn-79cef69b1ec58f9ce010461eaaad04c896a4fe15.tar.gz |
Revert "IVGCVSW-6267 Add support of Unidirectional Sequence Lstm fp32/fp16 to Cl"
This reverts commit ad9171701e6032b3ddf3573f85780bae30c512c6.
Reason for revert: cannot update ACL pin until 22.02 release.
!ComputeLibrary:7150
Change-Id: Ic19a3c2fe5d6f7e5568174f18ea73684b269f72d
Diffstat (limited to 'src/backends/cl/ClLayerSupport.cpp')
-rw-r--r-- | src/backends/cl/ClLayerSupport.cpp | 188 |
1 files changed, 77 insertions, 111 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index e52f578bc0..e5204e4d5b 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -78,7 +78,6 @@ #include "workloads/ClSubtractionWorkload.hpp" #include "workloads/ClTransposeConvolution2dWorkload.hpp" #include "workloads/ClTransposeWorkload.hpp" -#include "workloads/ClUnidirectionalSequenceLstmFloatWorkload.hpp" #endif @@ -213,13 +212,6 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, infos[1], *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)), reasonIfUnsupported); - case LayerType::Cast: - return IsCastSupported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::ChannelShuffle: - return IsChannelShuffleSupported(infos[0], - infos[1], - *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)), - reasonIfUnsupported); case LayerType::Comparison: return IsComparisonSupported(infos[0], infos[1], @@ -244,14 +236,6 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::ConvertFp32ToFp16: return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::ConvertBf16ToFp32: - return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0], - infos[1], - reasonIfUnsupported); - case LayerType::ConvertFp32ToBf16: - return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0], - infos[1], - reasonIfUnsupported); case LayerType::Convolution2d: { if (infos.size() != 4) @@ -280,34 +264,6 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, reasonIfUnsupported); } } - case LayerType::Convolution3d: - { - if (infos.size() != 4) - { - throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. " - "TensorInfos should be of format: {input, output, weights, biases}."); - } - - auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor)); - if (infos[3] == TensorInfo()) - { - return IsConvolution3dSupported(infos[0], - infos[1], - desc, - infos[2], - EmptyOptional(), - reasonIfUnsupported); - } - else - { - return IsConvolution3dSupported(infos[0], - infos[1], - desc, - infos[2], - infos[3], - reasonIfUnsupported); - } - } case LayerType::DepthToSpace: return IsDepthToSpaceSupported(infos[0], infos[1], @@ -405,17 +361,16 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)), lstmParamsInfo.value(), reasonIfUnsupported); - case LayerType::Map: - return true; - case LayerType::MemCopy: - return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::MemImport: - return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::Merge: - return LayerSupportBase::IsMergeSupported(infos[0], - infos[1], - infos[2], - reasonIfUnsupported); + case LayerType::QLstm: + return IsQLstmSupported(infos[0], + infos[1], + infos[2], + infos[3], + infos[4], + infos[5], + *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)), + lstmParamsInfo.value(), + reasonIfUnsupported); case LayerType::Maximum: return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); case LayerType::Mean: @@ -451,16 +406,6 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, reasonIfUnsupported); case LayerType::Prelu: return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported); - case LayerType::QLstm: - return IsQLstmSupported(infos[0], - infos[1], - infos[2], - infos[3], - infos[4], - infos[5], - *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)), - lstmParamsInfo.value(), - reasonIfUnsupported); case LayerType::Quantize: return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported); case LayerType::QuantizedLstm: @@ -471,13 +416,6 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, infos[4], quantizedLstmParamsInfo.value(), reasonIfUnsupported); - case LayerType::Rank: - return true; - case LayerType::Reduce: - return IsReduceSupported(infos[0], - infos[1], - *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)), - reasonIfUnsupported); case LayerType::Reshape: return IsReshapeSupported(infos[0], infos[1], @@ -488,10 +426,11 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, infos[1], *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)), reasonIfUnsupported); - case LayerType::Shape: - return LayerSupportBase::IsShapeSupported(infos[0], - infos[1], - reasonIfUnsupported); + case LayerType::Reduce: + return IsReduceSupported(infos[0], + infos[1], + *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)), + reasonIfUnsupported); case LayerType::Slice: return IsSliceSupported(infos[0], infos[1], @@ -576,23 +515,72 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, reasonIfUnsupported); } } - case LayerType::UnidirectionalSequenceLstm: - return IsUnidirectionalSequenceLstmSupported(infos[0], - infos[1], - infos[2], - infos[3], - infos[4], - infos[5], - *(PolymorphicDowncast<const - UnidirectionalSequenceLstmDescriptor*>(&descriptor)), - lstmParamsInfo.value(), - reasonIfUnsupported); + case LayerType::Cast: + return IsCastSupported(infos[0], infos[1], reasonIfUnsupported); + case LayerType::ChannelShuffle: + return IsChannelShuffleSupported(infos[0], + infos[1], + *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)), + reasonIfUnsupported); + case LayerType::Convolution3d: + { + if (infos.size() != 4) + { + throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. " + "TensorInfos should be of format: {input, output, weights, biases}."); + } + + auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor)); + if (infos[3] == TensorInfo()) + { + return IsConvolution3dSupported(infos[0], + infos[1], + desc, + infos[2], + EmptyOptional(), + reasonIfUnsupported); + } + else + { + return IsConvolution3dSupported(infos[0], + infos[1], + desc, + infos[2], + infos[3], + reasonIfUnsupported); + } + } + case LayerType::MemCopy: + return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported); + case LayerType::MemImport: + return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported); + case LayerType::Map: + return true; case LayerType::Unmap: return true; + case LayerType::Merge: + return LayerSupportBase::IsMergeSupported(infos[0], + infos[1], + infos[2], + reasonIfUnsupported); + case LayerType::Rank: + return true; + case LayerType::Shape: + return LayerSupportBase::IsShapeSupported(infos[0], + infos[1], + reasonIfUnsupported); + case LayerType::ConvertBf16ToFp32: + return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0], + infos[1], + reasonIfUnsupported); + case LayerType::ConvertFp32ToBf16: + return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0], + infos[1], + reasonIfUnsupported); default: // layers not supported in cl by default: - // debug, detectionpostprocess, fakequantization, - // precompiled, standin, switch, pooling3d + // debug, detectionpostprocess, fakequantization, precompiled, + // standin, switch, unidirectionalsequencelstm, pooling3d return false; } } @@ -1427,26 +1415,4 @@ bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input, FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } -bool ClLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input, - const TensorInfo& outputStateIn, - const TensorInfo& cellStateIn, - const TensorInfo& output, - const Optional<TensorInfo>& hiddenStateOutput, - const Optional<TensorInfo>& cellStateOutput, - const UnidirectionalSequenceLstmDescriptor& descriptor, - const LstmInputParamsInfo& paramsInfo, - Optional<std::string&> reasonIfUnsupported) const -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClUnidirectionalSequenceLstmFloatWorkloadValidate, - reasonIfUnsupported, - input, - outputStateIn, - cellStateIn, - output, - hiddenStateOutput, - cellStateOutput, - descriptor, - paramsInfo); -} - } // namespace armnn |