diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2021-12-24 12:24:40 +0000 |
---|---|---|
committer | Cathal Corbett <cathal.corbett@arm.com> | 2022-01-19 12:58:56 +0000 |
commit | 34b429c2215bab7fd12b761dd5c200414c1b4a5b (patch) | |
tree | 7518ec40c8e56dbf229421d47d9527ed7aadd33e /src/backends/backendsCommon/LayerSupportBase.cpp | |
parent | 479e230479c10dcf9b9a79a3e80f0847d6ae5293 (diff) | |
download | armnn-34b429c2215bab7fd12b761dd5c200414c1b4a5b.tar.gz |
IVGCVSW-6629 Stabilize the ILayerSupport interface with unified strategy.
* New Virtual Function Added.
* Implemented in Ref Neon CL with switch statement for all layers.
* Deprecate original IsXXXLayerSupported functions.
* Ensure Npu not broken with change.
Change-Id: Icf61b16beec83d6af1cb287e24ab1e98a6138c8c
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Diffstat (limited to 'src/backends/backendsCommon/LayerSupportBase.cpp')
-rw-r--r-- | src/backends/backendsCommon/LayerSupportBase.cpp | 47 |
1 files changed, 46 insertions, 1 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 220590e197..89a0772602 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -4,13 +4,13 @@ // #include <armnn/Deprecated.hpp> -#include <armnn/Descriptors.hpp> #include <armnn/Exceptions.hpp> #include <armnn/Types.hpp> #include <backendsCommon/LayerSupportBase.hpp> #include <armnn/utility/IgnoreUnused.hpp> +#include <armnn/utility/PolymorphicDowncast.hpp> namespace { @@ -37,6 +37,51 @@ bool DefaultLayerSupport(const char* func, namespace armnn { +bool LayerSupportBase::IsLayerSupported(const LayerType& type, + const std::vector<TensorInfo>& infos, + const BaseDescriptor& descriptor, + const Optional<LstmInputParamsInfo>&, + const Optional<QuantizedLstmInputParamsInfo>&, + Optional<std::string&> reasonIfUnsupported) const +{ + switch(type) + { + case LayerType::MemCopy: + return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported); + case LayerType::MemImport: + return IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported); + case LayerType::StandIn: + { + auto desc = *(PolymorphicDowncast<const StandInDescriptor*>(&descriptor)); + + if (infos.size() != (desc.m_NumInputs + desc.m_NumOutputs)) + { + throw InvalidArgumentException("Number of StandIn layer TensorInfos does not equal " + "the combined number of input and output slots assigned " + "to the StandIn descriptor"); + } + + std::vector<const TensorInfo*> inputInfos; + for (uint32_t i = 0; i < desc.m_NumInputs; i++) + { + inputInfos.push_back(&infos[i]); + } + std::vector<const TensorInfo*> outputInfos; + for (uint32_t i = desc.m_NumInputs; i < infos.size(); i++) + { + outputInfos.push_back(&infos[i]); + } + + return IsStandInSupported(inputInfos, + outputInfos, + desc, + reasonIfUnsupported); + } + default: + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); + } +} + bool LayerSupportBase::IsActivationSupported(const TensorInfo&, // input const TensorInfo&, //output const ActivationDescriptor&, // descriptor |