39 template<
typename FactoryType>
49 if (factory.SupportsSubTensors())
55 && ((concatAxis == numberOfDimensions - 1) || (concatAxis == numberOfDimensions - 2));
59 std::queue<ConcatLayer*> m_ConcatLayers;
61 m_ConcatLayers.push(
this);
62 while (!m_ConcatLayers.empty())
72 bool canUseSubTensorOnXorY =
true;
73 bool isTensorHandleFactory = std::is_same<armnn::ITensorHandleFactory, FactoryType>::value;
74 if (isTensorHandleFactory)
76 for (
unsigned int i = 0; i < numInputSlots; ++i)
80 std::vector<Capability> capabilities =
86 canUseSubTensorOnXorY =
false;
87 if (capabilities.empty())
89 canUseSubTensorOnXorY =
true;
96 && (PolymorphicDowncast<const Layer*>(currentLayer))->GetType() ==
LayerType::Concat)
98 canUseSubTensorOnXorY =
false;
101 if (!canUseSubTensorOnXorY)
109 std::vector<std::unique_ptr<ITensorHandle>> subTensors(0);
110 subTensors.reserve(numInputSlots);
111 for (
unsigned int i = 0; i < numInputSlots; ++i)
116 auto CreateSubTensor = [&]()
130 canUseSubTensorOnXorY)
133 return factory.CreateSubTensorHandle(*parentTensor,
138 return std::unique_ptr<ITensorHandle>();
141 auto subTensor = CreateSubTensor();
148 subTensors.push_back(std::move(subTensor));
153 if (subTensors.size() < numInputSlots)
160 for (
auto& subTensor : subTensors)
165 ARMNN_ASSERT_MSG(subTensor,
"ConcatLayer: Expected a valid sub-tensor for substitution.");
166 outputHandler.
SetData(std::move(subTensor));
172 m_ConcatLayers.push(PolymorphicDowncast<ConcatLayer*>(&inputLayer));
182 const bool isMemoryManaged)
189 CreateTensors(registry, workloadFactory, isMemoryManaged);
195 CreateTensors(registry, *handleFactory, isMemoryManaged);
209 for (
unsigned int i=0; i< inputShapes.size(); i++)
211 auto& inputShape = inputShapes[i];
213 ConditionalThrowIfNotEqual<LayerValidationException>(
214 "ConcatLayer: Num Dimensions must match all inputs.",
216 inputShape.GetNumDimensions());
220 std::vector<unsigned int> extentMin(numDims);
221 std::vector<unsigned int> extentMax(numDims);
222 for (
unsigned int i = 0; i < inputShapes.size(); i++)
226 for (
unsigned int d = 0; d < numDims; d++)
228 extentMin[d] = std::min(extentMin[d], origin[d]);
229 extentMax[d] = std::max(extentMax[d], origin[d] + shape[d]);
234 if (!std::all_of(extentMin.begin(), extentMin.end(), [](
unsigned int s) {
return s == 0; }))
242 for (
unsigned int a = 0; a < inputShapes.size(); a++)
246 for (
unsigned int b = 0; b < a; b++)
251 bool allAxesOverlap =
true;
252 for (
unsigned int d = 0; d < numDims && allAxesOverlap; d++)
254 unsigned int a1 = aOrigin[d];
255 unsigned int a2 = aOrigin[d] + aShape[d];
257 unsigned int b1 = bOrigin[d];
258 unsigned int b2 = bOrigin[d] + bShape[d];
260 if (a2 <= b1 || b2 <= a1)
262 allAxesOverlap =
false;
275 unsigned int totalViewsVolume = 0;
276 for (
unsigned int i = 0; i < inputShapes.size(); i++)
278 totalViewsVolume += inputShapes[i].GetNumElements();
280 unsigned int outputVolume = 1;
281 for (
unsigned int d = 0; d < numDims; d++)
283 outputVolume *= (extentMax[d] - extentMin[d]);
286 ConditionalThrowIfNotEqual<LayerValidationException>(
287 "ConcatLayer: there are some gaps between views",
291 return std::vector<TensorShape>({
TensorShape({numDims, extentMax.data()}) });
297 ConditionalThrowIfNotEqual<LayerValidationException>(
298 "ConcatLayer: Num Inputs must match num views.",
308 std::vector<TensorShape> inputShapes;
ConcatLayer(const OriginsDescriptor ¶m, const char *name)
Constructor to create a ConcatLayer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
OriginsDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const OriginsDescriptor & GetParameters() const
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
const TensorShape & GetShape() const
void SetData(std::unique_ptr< ITensorHandle > data)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Concat type.
Layer & GetOwningLayer() const
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2020 ARM Limited.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
ConcatLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
virtual std::vector< Capability > GetCapabilities(const IConnectableLayer *layer, const IConnectableLayer *connectedLayer, CapabilityClass capabilityClass)
std::vector< ViewOrigin > m_ViewOrigins
#define ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_ASSERT_MSG(COND, MSG)
An OriginsDescriptor for the ConcatLayer.
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ConcatLayer.
This layer represents a merge operation.
#define ARMNN_ASSERT(COND)
ITensorHandle * GetData() const
Gets the allocated tensor memory.
std::vector< OutputHandler > m_OutputHandlers
void SetAdditionalInfo(QueueDescriptor &descriptor) const
virtual void VisitConcatLayer(const IConnectableLayer *layer, const OriginsDescriptor &concatDescriptor, const char *name=nullptr)
Function that a concat layer should call back to when its Accept(ILayerVisitor&) function is invoked...
const OutputHandler & GetOutputHandler(unsigned int i=0) const
ClWorkloadFactory FactoryType
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
uint32_t GetNumDimensions() const
Get the number of dimensions.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
LayerType GetType() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
const OutputHandler & GetOutputHandler() const
const char * GetName() const override
Returns the name of the layer.
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
uint32_t GetNumViews() const
Get the number of views.
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
unsigned int GetConcatAxis() const
Get the concatenation axis value.
const TensorInfo & GetTensorInfo() const override
static const FactoryId LegacyFactoryId
ShapeInferenceMethod m_ShapeInferenceMethod
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.