From 2b8c1da565871b3e69567c2cfc46c8dcbef301aa Mon Sep 17 00:00:00 2001 From: Matthew Jackson Date: Thu, 4 Jul 2019 14:59:16 +0100 Subject: IVGCVSW-3418 Add Arm NN front end support for the new Stack layer * Added new StackLayer class * Made necessary changes to Descriptors, ILayerSupport, ILayerVisitor, etc. * Added unit tests Signed-off-by: Matthew Jackson Change-Id: Ieb97a928a342ffe1901c6058eb895711c358fd3d --- src/backends/backendsCommon/LayerSupportBase.cpp | 8 ++ src/backends/backendsCommon/LayerSupportBase.hpp | 5 ++ src/backends/backendsCommon/WorkloadData.cpp | 85 ++++++++++++++++++++++ src/backends/backendsCommon/WorkloadData.hpp | 6 ++ src/backends/backendsCommon/WorkloadFactory.cpp | 33 +++++++++ src/backends/backendsCommon/WorkloadFactory.hpp | 3 + .../test/IsLayerSupportedTestImpl.hpp | 2 + 7 files changed, 142 insertions(+) (limited to 'src/backends/backendsCommon') diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index ea22fac9ce..26b98a22b2 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -415,6 +415,14 @@ bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsStackSupported(const std::vector inputs, + const TensorInfo& output, + const StackDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input, const TensorInfo& output, const StridedSliceDescriptor& descriptor, diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 36b8e77c38..dad07981fd 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -257,6 +257,11 @@ public: const ViewsDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsStackSupported(const std::vector inputs, + const TensorInfo& output, + const StackDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsStridedSliceSupported(const TensorInfo& input, const TensorInfo& output, const StridedSliceDescriptor& descriptor, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 324c1debc0..878602391c 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -581,6 +581,91 @@ void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const "ConcatQueueDescriptor"); } +//--------------------------------------------------------------- +void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateNumOutputs(workloadInfo, "StackQueueDescriptor", 1); + + if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size()) + { + throw InvalidArgumentException("StackQueueDescriptor: Must have the defined number of input tensors."); + } + + // All inputs must have the same shape, which is defined in parameters + const TensorShape& inputShape = m_Parameters.m_InputShape; + for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i) + { + if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape) + { + throw InvalidArgumentException("StackQueueDescriptor: All input tensor shapes " + "must match the defined shape."); + } + } + + // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive), + // since the output tensor has an additional dimension. + if (m_Parameters.m_Axis > inputShape.GetNumDimensions()) + { + throw InvalidArgumentException("StackQueueDescriptor: Axis may not be greater " + "than the number of input dimensions."); + } + + // Output shape must be as inferred from the input shape + const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape(); + for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i) + { + if (outputShape[i] != inputShape[i]) + { + throw InvalidArgumentException("StackQueueDescriptor: Output tensor must " + "match shape inferred from input tensor."); + } + } + + if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs) + { + throw InvalidArgumentException("StackQueueDescriptor: Output tensor must " + "match shape inferred from input tensor."); + } + + for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i) + { + if (outputShape[i] != inputShape[i-1]) + { + throw InvalidArgumentException("StackQueueDescriptor: Output tensor must " + "match shape inferred from input tensor."); + } + } + + // Check the supported data types + std::vector supportedTypes = + { + DataType::Float32, + DataType::Float16, + DataType::Boolean, + DataType::Signed32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], + supportedTypes, + "StackQueueDescriptor"); + + for (unsigned int i = 1; i < workloadInfo.m_InputTensorInfos.size(); ++i) + { + ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_InputTensorInfos[i], + "StackQueueDescriptor", + "InputTensor[0]", + "InputTensor[" + std::to_string(i) + "]"); + } + ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "StackQueueDescriptor", + "InputTensor[0]", + "OutputTensor[0]"); +} + //--------------------------------------------------------------- void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index d241f7b24f..f3d50699e6 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -110,6 +110,12 @@ struct ConcatQueueDescriptor : QueueDescriptorWithParameters // Deprecated. Use ConcatQueueDescriptor instead using MergerQueueDescriptor = ConcatQueueDescriptor; +// Stack layer workload data. +struct StackQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + // Activation layer workload data. struct ActivationQueueDescriptor : QueueDescriptorWithParameters { diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 1c23e1774b..a24a325b2d 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -729,6 +729,33 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::Stack: + { + auto cLayer = boost::polymorphic_downcast(&layer); + + // Get vector of all inputs. + auto getTensorInfo = [&dataType](const InputSlot& slot) + { + return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType); + }; + auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo); + auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo); + std::vector inputs(beginI, endI); + + auto getTensorInfoPtr = [](const TensorInfo& info) + { + return &info; + }; + auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr); + auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr); + std::vector inputPtrs(beginPtr, endPtr); + + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason); + + break; + } case LayerType::StridedSlice: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -1130,6 +1157,12 @@ std::unique_ptr IWorkloadFactory::CreateSpaceToDepth(const SpaceToDep return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, const WorkloadInfo& Info) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index e09640f760..749a258a9d 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -189,6 +189,9 @@ public: virtual std::unique_ptr CreateSplitter(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr CreateStack(const StackQueueDescriptor& descriptor, + const WorkloadInfo& Info) const; + virtual std::unique_ptr CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, const WorkloadInfo& Info) const; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index b02ab7b301..6aff7596b5 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -408,6 +408,8 @@ DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth) DECLARE_LAYER_POLICY_2_PARAM(Splitter) +DECLARE_LAYER_POLICY_2_PARAM(Stack) + DECLARE_LAYER_POLICY_2_PARAM(StridedSlice) DECLARE_LAYER_POLICY_1_PARAM(Subtraction) -- cgit v1.2.1