aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
authorMatthew Jackson <matthew.jackson@arm.com>2019-07-04 14:59:16 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-07-10 12:06:51 +0000
commit2b8c1da565871b3e69567c2cfc46c8dcbef301aa (patch)
tree682327de212e273405cb257028568db997644c35 /src/backends
parentad5293a86e315049de36afd723dcd1a7e70681a7 (diff)
downloadarmnn-2b8c1da565871b3e69567c2cfc46c8dcbef301aa.tar.gz
IVGCVSW-3418 Add Arm NN front end support for the new Stack layer
* Added new StackLayer class * Made necessary changes to Descriptors, ILayerSupport, ILayerVisitor, etc. * Added unit tests Signed-off-by: Matthew Jackson <matthew.jackson@arm.com> Change-Id: Ieb97a928a342ffe1901c6058eb895711c358fd3d
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp8
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp85
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp6
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp33
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
7 files changed, 142 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index ea22fac9ce..26b98a22b2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -415,6 +415,14 @@ bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 36b8e77c38..dad07981fd 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -257,6 +257,11 @@ public:
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsStackSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 324c1debc0..878602391c 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -582,6 +582,91 @@ void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
}
//---------------------------------------------------------------
+void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateNumOutputs(workloadInfo, "StackQueueDescriptor", 1);
+
+ if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Must have the defined number of input tensors.");
+ }
+
+ // All inputs must have the same shape, which is defined in parameters
+ const TensorShape& inputShape = m_Parameters.m_InputShape;
+ for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
+ {
+ if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: All input tensor shapes "
+ "must match the defined shape.");
+ }
+ }
+
+ // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
+ // since the output tensor has an additional dimension.
+ if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Axis may not be greater "
+ "than the number of input dimensions.");
+ }
+
+ // Output shape must be as inferred from the input shape
+ const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
+ for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
+ {
+ if (outputShape[i] != inputShape[i])
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+ }
+
+ if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+
+ for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
+ {
+ if (outputShape[i] != inputShape[i-1])
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+ }
+
+ // Check the supported data types
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::Float16,
+ DataType::Boolean,
+ DataType::Signed32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ supportedTypes,
+ "StackQueueDescriptor");
+
+ for (unsigned int i = 1; i < workloadInfo.m_InputTensorInfos.size(); ++i)
+ {
+ ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_InputTensorInfos[i],
+ "StackQueueDescriptor",
+ "InputTensor[0]",
+ "InputTensor[" + std::to_string(i) + "]");
+ }
+ ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_OutputTensorInfos[0],
+ "StackQueueDescriptor",
+ "InputTensor[0]",
+ "OutputTensor[0]");
+}
+
+//---------------------------------------------------------------
void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index d241f7b24f..f3d50699e6 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -110,6 +110,12 @@ struct ConcatQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor>
// Deprecated. Use ConcatQueueDescriptor instead
using MergerQueueDescriptor = ConcatQueueDescriptor;
+// Stack layer workload data.
+struct StackQueueDescriptor : QueueDescriptorWithParameters<StackDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
// Activation layer workload data.
struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor>
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 1c23e1774b..a24a325b2d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -729,6 +729,33 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::Stack:
+ {
+ auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
+
+ // Get vector of all inputs.
+ auto getTensorInfo = [&dataType](const InputSlot& slot)
+ {
+ return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
+ };
+ auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
+ auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
+ std::vector<TensorInfo> inputs(beginI, endI);
+
+ auto getTensorInfoPtr = [](const TensorInfo& info)
+ {
+ return &info;
+ };
+ auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
+
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
+
+ break;
+ }
case LayerType::StridedSlice:
{
auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
@@ -1130,6 +1157,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDep
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index e09640f760..749a258a9d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -189,6 +189,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
+
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index b02ab7b301..6aff7596b5 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -408,6 +408,8 @@ DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
DECLARE_LAYER_POLICY_2_PARAM(Splitter)
+DECLARE_LAYER_POLICY_2_PARAM(Stack)
+
DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
DECLARE_LAYER_POLICY_1_PARAM(Subtraction)