ArmNN
 22.05.01
ConcatLayer.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
8 
9 namespace armnn
10 {
11 
12 /// This layer represents a merge operation.
13 class ConcatLayer : public LayerWithParameters<OriginsDescriptor>
14 {
15 public:
16  /// Makes a workload for the Concat type.
17  /// @param [in] graph The graph where this layer can be found.
18  /// @param [in] factory The workload factory which will create the workload.
19  /// @return A pointer to the created workload, or nullptr if not created.
20  virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
21 
22  /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
23  /// otherwise creates tensor handlers.
24  /// @param [in] registry Contains all the registered tensor handle factories available for use.
25  /// @param [in] factory The workload factory which will create the workload.
26  /// @param [in] IsMemoryManaged Determine whether or not to assign a memory manager during creation
27  /// @param [in] MemorySource Determine the source of memory e.g Malloc
28  virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
29  const IWorkloadFactory& factory,
30  const bool IsMemoryManaged = true) override;
31 
32  /// Creates a dynamically-allocated copy of this layer.
33  /// @param [in] graph The graph into which this layer is being cloned.
34  ConcatLayer* Clone(Graph& graph) const override;
35 
36  /// Check if the input tensor shape(s)
37  /// will lead to a valid configuration of @ref ConcatLayer.
38  /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
39  void ValidateTensorShapesFromInputs() override;
40 
41  /// By default returns inputShapes if the number of inputs are equal to number of outputs,
42  /// otherwise infers the output shapes from given input shapes and layer properties.
43  /// @param [in] inputShapes The input shapes layer has.
44  /// @return A vector to the inferred output shape.
45  std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
46 
48  void Accept(ILayerVisitor& visitor) const override;
50 
51 protected:
52  /// Constructor to create a ConcatLayer.
53  /// @param [in] param OriginsDescriptor to configure the concat operation.
54  /// @param [in] name Optional name for the layer.
55  ConcatLayer(const OriginsDescriptor& param, const char* name);
56 
57  /// Default destructor
58  ~ConcatLayer() = default;
59 
60 private:
61  template <typename FactoryType>
62  void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory, bool isMemoryManaged);
63 
64 };
65 
66 } // namespace
ConcatLayer(const OriginsDescriptor &param, const char *name)
Constructor to create a ConcatLayer.
Definition: ConcatLayer.cpp:18
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Concat type.
Definition: ConcatLayer.cpp:23
Copyright (c) 2021 ARM Limited and Contributors.
ConcatLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
An OriginsDescriptor for the ConcatLayer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ConcatLayer.
This layer represents a merge operation.
Definition: ConcatLayer.hpp:13
~ConcatLayer()=default
Default destructor.
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true) override
Set the outputs to be appropriate sub tensors of the input if sub tensors are supported otherwise cre...