ArmNN
 22.05
LayersFwd.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "InternalTypes.hpp"
8 
10 #include "layers/AdditionLayer.hpp"
14 #include "layers/CastLayer.hpp"
17 #include "layers/ConcatLayer.hpp"
18 #include "layers/ConstantLayer.hpp"
25 #include "layers/DebugLayer.hpp"
30 #include "layers/DivisionLayer.hpp"
33 #include "layers/FillLayer.hpp"
34 #include "layers/FloorLayer.hpp"
36 #include "layers/GatherLayer.hpp"
37 #include "layers/GatherNdLayer.hpp"
38 #include "layers/InputLayer.hpp"
43 #include "layers/LstmLayer.hpp"
44 #include "layers/MapLayer.hpp"
45 #include "layers/MaximumLayer.hpp"
46 #include "layers/MeanLayer.hpp"
47 #include "layers/MemCopyLayer.hpp"
49 #include "layers/MergeLayer.hpp"
50 #include "layers/MinimumLayer.hpp"
53 #include "layers/OutputLayer.hpp"
54 #include "layers/PadLayer.hpp"
55 #include "layers/PermuteLayer.hpp"
59 #include "layers/PreluLayer.hpp"
60 #include "layers/QuantizeLayer.hpp"
61 #include "layers/QLstmLayer.hpp"
63 #include "layers/RankLayer.hpp"
64 #include "layers/ReduceLayer.hpp"
65 #include "layers/ReshapeLayer.hpp"
66 #include "layers/ResizeLayer.hpp"
67 #include "layers/ShapeLayer.hpp"
68 #include "layers/SliceLayer.hpp"
69 #include "layers/SoftmaxLayer.hpp"
72 #include "layers/SplitterLayer.hpp"
73 #include "layers/StackLayer.hpp"
74 #include "layers/StandInLayer.hpp"
77 #include "layers/SwitchLayer.hpp"
81 #include "layers/UnmapLayer.hpp"
82 
83 namespace armnn
84 {
85 
86 template <LayerType Type>
88 
89 template <LayerType Type>
91 
92 template <typename T>
93 constexpr LayerType LayerEnumOf(const T* = nullptr);
94 
95 #define DECLARE_LAYER_IMPL(_, LayerName) \
96  class LayerName##Layer; \
97  template <> \
98  struct LayerTypeOfImpl<LayerType::_##LayerName> \
99  { \
100  using Type = LayerName##Layer; \
101  }; \
102  template <> \
103  constexpr LayerType LayerEnumOf(const LayerName##Layer*) \
104  { \
105  return LayerType::_##LayerName; \
106  }
107 
108 #define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName)
109 
111 DECLARE_LAYER(Addition)
113 DECLARE_LAYER(BatchNormalization)
116 DECLARE_LAYER(ChannelShuffle)
117 DECLARE_LAYER(Comparison)
119 DECLARE_LAYER(Constant)
120 DECLARE_LAYER(ConvertBf16ToFp32)
121 DECLARE_LAYER(ConvertFp16ToFp32)
122 DECLARE_LAYER(ConvertFp32ToBf16)
123 DECLARE_LAYER(ConvertFp32ToFp16)
124 DECLARE_LAYER(Convolution2d)
125 DECLARE_LAYER(Convolution3d)
128 DECLARE_LAYER(DepthwiseConvolution2d)
131 DECLARE_LAYER(Division)
132 DECLARE_LAYER(ElementwiseUnary)
138 DECLARE_LAYER(GatherNd)
140 DECLARE_LAYER(InstanceNormalization)
141 DECLARE_LAYER(L2Normalization)
142 DECLARE_LAYER(LogicalBinary)
149 DECLARE_LAYER(MemImport)
152 DECLARE_LAYER(Multiplication)
153 DECLARE_LAYER(Normalization)
159 DECLARE_LAYER(PreCompiled)
163 DECLARE_LAYER(QuantizedLstm)
177 DECLARE_LAYER(Subtraction)
180 DECLARE_LAYER(TransposeConvolution2d)
181 DECLARE_LAYER(UnidirectionalSequenceLstm)
183 }
constexpr LayerType LayerEnumOf(const T *=nullptr)
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:14
typename LayerTypeOfImpl< Type >::Type LayerTypeOf
Definition: LayersFwd.hpp:90
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle *> inputs, std::vector< ITensorHandle *> outputs)
Definition: Splitter.hpp:17
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output, const TensorInfo &inputInfo, const TensorInfo &outputInfo)
Definition: Stack.cpp:12
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
Definition: Fill.cpp:13
void Reduce(const TensorInfo &inputInfo, const TensorInfo &outputInfo, Decoder< float > &input, Encoder< float > &output, const std::vector< uint32_t > axis, const ReduceOperation reduceOperation)
Definition: Reduce.cpp:70
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:16
Copyright (c) 2021 ARM Limited and Contributors.
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output, const int32_t axis)
Definition: Gather.cpp:17
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex)
Definition: Debug.cpp:19
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const ITensorHandle *inputHandle, ITensorHandle *outputHandle, const PadQueueDescriptor &data)
Definition: Pad.cpp:39
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:30
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:29
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
#define DECLARE_LAYER(LayerName)
Definition: LayersFwd.hpp:108
void Pooling3d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling3dDescriptor &params)
Computes the Pooling3d operation.
Definition: Pooling3d.cpp:172
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:142
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo...
Definition: Softmax.cpp:17
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
Definition: Resize.cpp:65
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467