ArmNN
 23.05
LayersFwd.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "InternalTypes.hpp"
8 
10 #include "layers/AdditionLayer.hpp"
15 #include "layers/CastLayer.hpp"
18 #include "layers/ConcatLayer.hpp"
19 #include "layers/ConstantLayer.hpp"
24 #include "layers/DebugLayer.hpp"
29 #include "layers/DivisionLayer.hpp"
33 #include "layers/FillLayer.hpp"
34 #include "layers/FloorLayer.hpp"
36 #include "layers/GatherLayer.hpp"
37 #include "layers/GatherNdLayer.hpp"
38 #include "layers/InputLayer.hpp"
43 #include "layers/LstmLayer.hpp"
44 #include "layers/MapLayer.hpp"
45 #include "layers/MaximumLayer.hpp"
46 #include "layers/MeanLayer.hpp"
47 #include "layers/MemCopyLayer.hpp"
49 #include "layers/MergeLayer.hpp"
50 #include "layers/MinimumLayer.hpp"
53 #include "layers/OutputLayer.hpp"
54 #include "layers/PadLayer.hpp"
55 #include "layers/PermuteLayer.hpp"
59 #include "layers/PreluLayer.hpp"
60 #include "layers/QuantizeLayer.hpp"
61 #include "layers/QLstmLayer.hpp"
63 #include "layers/RankLayer.hpp"
64 #include "layers/ReduceLayer.hpp"
65 #include "layers/ReshapeLayer.hpp"
66 #include "layers/ResizeLayer.hpp"
67 #include "layers/ShapeLayer.hpp"
68 #include "layers/SliceLayer.hpp"
69 #include "layers/SoftmaxLayer.hpp"
72 #include "layers/SplitterLayer.hpp"
73 #include "layers/StackLayer.hpp"
74 #include "layers/StandInLayer.hpp"
77 #include "layers/SwitchLayer.hpp"
81 #include "layers/UnmapLayer.hpp"
82 
83 namespace armnn
84 {
85 
86 template <LayerType Type>
88 
89 template <LayerType Type>
91 
92 template <typename T>
93 constexpr LayerType LayerEnumOf(const T* = nullptr);
94 
95 #define DECLARE_LAYER_IMPL(_, LayerName) \
96  class LayerName##Layer; \
97  template <> \
98  struct LayerTypeOfImpl<LayerType::_##LayerName> \
99  { \
100  using Type = LayerName##Layer; \
101  }; \
102  template <> \
103  constexpr LayerType LayerEnumOf(const LayerName##Layer*) \
104  { \
105  return LayerType::_##LayerName; \
106  }
107 
108 #define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName)
109 
111 DECLARE_LAYER(Addition)
114 DECLARE_LAYER(BatchNormalization)
117 DECLARE_LAYER(ChannelShuffle)
118 DECLARE_LAYER(Comparison)
120 DECLARE_LAYER(Constant)
121 DECLARE_LAYER(ConvertFp16ToFp32)
122 DECLARE_LAYER(ConvertFp32ToFp16)
123 DECLARE_LAYER(Convolution2d)
124 DECLARE_LAYER(Convolution3d)
127 DECLARE_LAYER(DepthwiseConvolution2d)
130 DECLARE_LAYER(Division)
131 DECLARE_LAYER(ElementwiseBinary)
132 DECLARE_LAYER(ElementwiseUnary)
138 DECLARE_LAYER(GatherNd)
140 DECLARE_LAYER(InstanceNormalization)
141 DECLARE_LAYER(L2Normalization)
142 DECLARE_LAYER(LogicalBinary)
149 DECLARE_LAYER(MemImport)
152 DECLARE_LAYER(Multiplication)
153 DECLARE_LAYER(Normalization)
159 DECLARE_LAYER(PreCompiled)
163 DECLARE_LAYER(QuantizedLstm)
177 DECLARE_LAYER(Subtraction)
180 DECLARE_LAYER(TransposeConvolution2d)
181 DECLARE_LAYER(UnidirectionalSequenceLstm)
183 }
armnn::Slice
void Slice(const TensorInfo &inputInfo, const SliceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: Slice.cpp:14
armnn::LayerTypeOfImpl
Definition: LayersFwd.hpp:87
Pooling2dLayer.hpp
MaximumLayer.hpp
AdditionLayer.hpp
armnn::BatchToSpaceNd
void BatchToSpaceNd(const DataLayoutIndexed &dataLayout, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &cropsData, Decoder< float > &inputDecoder, Encoder< float > &outputEncoder)
Definition: BatchToSpaceNd.cpp:35
RankLayer.hpp
armnn::DepthToSpace
void DepthToSpace(const TensorInfo &inputInfo, const DepthToSpaceDescriptor &descriptor, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: DepthToSpace.cpp:18
armnn::Stack
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output, const TensorInfo &inputInfo, const TensorInfo &outputInfo)
Definition: Stack.cpp:12
MemImportLayer.hpp
InstanceNormalizationLayer.hpp
Convolution3dLayer.hpp
ElementwiseBinaryLayer.hpp
armnn::ArgMinMax
void ArgMinMax(Decoder< float > &in, OUT *out, const TensorInfo &inputTensorInfo, const TensorInfo &outputTensorInfo, ArgMinMaxFunction function, int axis)
Definition: ArgMinMax.cpp:16
ReduceLayer.hpp
ConcatLayer.hpp
ElementwiseUnaryLayer.hpp
MemCopyLayer.hpp
armnn::FakeQuantization
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
Definition: RefFakeQuantizationFloat32Workload.cpp:17
armnn::Pad
void Pad(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const ITensorHandle *inputHandle, ITensorHandle *outputHandle, const PadQueueDescriptor &data)
Definition: Pad.cpp:39
LstmLayer.hpp
armnn::FullyConnected
void FullyConnected(const TensorShape &rInputShape, Decoder< float > &rInputDecoder, const TensorShape &rOutputShape, Encoder< float > &rOutputEncoder, const TensorShape &rWeightsShape, Decoder< float > &rWeightDecoder, Decoder< float > *pBiasDecoder, const bool biasEnabled, const unsigned int K, const bool transposeWeights)
Performs a matrix multiplication and optionally adds a bias.
Definition: FullyConnected.cpp:15
ChannelShuffleLayer.hpp
QuantizedLstmLayer.hpp
ConvertFp32ToFp16Layer.hpp
SubtractionLayer.hpp
armnn::StridedSlice
void StridedSlice(const TensorInfo &inputInfo, const StridedSliceDescriptor &params, const void *inputData, void *outputData, unsigned int dataTypeSize)
Definition: StridedSlice.cpp:90
armnn::LayerEnumOf
constexpr LayerType LayerEnumOf(const T *=nullptr)
PermuteLayer.hpp
DivisionLayer.hpp
QuantizeLayer.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
NormalizationLayer.hpp
ConvertFp16ToFp32Layer.hpp
BatchToSpaceNdLayer.hpp
ReshapeLayer.hpp
armnn::Pooling3d
void Pooling3d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling3dDescriptor &params)
Computes the Pooling3d operation.
Definition: Pooling3d.cpp:172
TransposeLayer.hpp
ActivationLayer.hpp
armnn::Debug
void Debug(const TensorInfo &inputInfo, const T *inputData, LayerGuid guid, const std::string &layerName, unsigned int slotIndex, bool outputsToFile)
Definition: Debug.cpp:97
DetectionPostProcessLayer.hpp
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:479
armnn::Reduce
void Reduce(const TensorInfo &inputInfo, const TensorInfo &outputInfo, Decoder< float > &input, Encoder< float > &output, const std::vector< uint32_t > axis, const ReduceOperation reduceOperation)
Definition: Reduce.cpp:70
L2NormalizationLayer.hpp
MinimumLayer.hpp
CastLayer.hpp
GatherLayer.hpp
SoftmaxLayer.hpp
ArgMinMaxLayer.hpp
Convolution2dLayer.hpp
ShapeLayer.hpp
SpaceToDepthLayer.hpp
MergeLayer.hpp
UnidirectionalSequenceLstmLayer.hpp
ComparisonLayer.hpp
UnmapLayer.hpp
armnn::LogSoftmax
void LogSoftmax(Decoder< float > &input, Encoder< float > &output, const TensorInfo &inputInfo, const LogSoftmaxDescriptor &descriptor)
Definition: LogSoftmax.cpp:29
InternalTypes.hpp
FloorLayer.hpp
armnn::Pooling2d
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor &params)
Computes the Pooling2d operation.
Definition: Pooling2d.cpp:142
armnn::DetectionPostProcess
void DetectionPostProcess(const TensorInfo &boxEncodingsInfo, const TensorInfo &scoresInfo, const TensorInfo &anchorsInfo, const TensorInfo &detectionBoxesInfo, const TensorInfo &detectionClassesInfo, const TensorInfo &detectionScoresInfo, const TensorInfo &numDetectionsInfo, const DetectionPostProcessDescriptor &desc, Decoder< float > &boxEncodings, Decoder< float > &scores, Decoder< float > &anchors, float *detectionBoxes, float *detectionClasses, float *detectionScores, float *numDetections)
Definition: DetectionPostProcess.cpp:140
armnn::LayerTypeOf
typename LayerTypeOfImpl< Type >::Type LayerTypeOf
Definition: LayersFwd.hpp:90
armnn::Softmax
void Softmax(Decoder< float > &in, Encoder< float > &out, const TensorInfo &inputTensorInfo, float beta, int axis)
Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
Definition: Softmax.cpp:17
FakeQuantizationLayer.hpp
BatchNormalizationLayer.hpp
MapLayer.hpp
DebugLayer.hpp
armnn::Gather
void Gather(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo, Decoder< float > &params, const int32_t *indices, Encoder< float > &output, const int32_t axis_int)
Definition: Gather.cpp:14
GatherNdLayer.hpp
SpaceToBatchNdLayer.hpp
Pooling3dLayer.hpp
armnn::Fill
void Fill(Encoder< float > &output, const TensorShape &desiredOutputShape, const float value)
Creates a tensor and fills it with a scalar value.
Definition: Fill.cpp:13
PreCompiledLayer.hpp
armnn::Splitter
void Splitter(const SplitterQueueDescriptor &data, std::vector< ITensorHandle * > inputs, std::vector< ITensorHandle * > outputs)
Definition: Splitter.hpp:17
DepthToSpaceLayer.hpp
PreluLayer.hpp
SliceLayer.hpp
armnn::Quantize
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:30
DepthwiseConvolution2dLayer.hpp
MultiplicationLayer.hpp
armnn::Activation
float Activation(float in, ActivationFunction function, float a, float b)
Definition: Activation.cpp:13
DequantizeLayer.hpp
LogSoftmaxLayer.hpp
StridedSliceLayer.hpp
ConstantLayer.hpp
FillLayer.hpp
BatchMatMulLayer.hpp
InputLayer.hpp
armnnUtils::Transpose
void Transpose(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Transpose.cpp:120
PadLayer.hpp
SwitchLayer.hpp
ResizeLayer.hpp
armnn::SpaceToDepth
void SpaceToDepth(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToDepthDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
Definition: SpaceToDepth.cpp:36
StandInLayer.hpp
armnn::Resize
void Resize(Decoder< float > &in, const TensorInfo &inputInfo, Encoder< float > &out, const TensorInfo &outputInfo, DataLayoutIndexed dataLayout, armnn::ResizeMethod resizeMethod, bool alignCorners, bool halfPixelCenters)
Definition: Resize.cpp:65
MeanLayer.hpp
FullyConnectedLayer.hpp
armnn::Dequantize
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
StackLayer.hpp
SplitterLayer.hpp
LogicalBinaryLayer.hpp
TransposeConvolution2dLayer.hpp
DECLARE_LAYER
#define DECLARE_LAYER(LayerName)
Definition: LayersFwd.hpp:108
armnnUtils::Permute
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
armnn::SpaceToBatchNd
void SpaceToBatchNd(const TensorInfo &inputInfo, const TensorInfo &outputInfo, const SpaceToBatchNdDescriptor &params, Decoder< float > &inputData, Encoder< float > &outputData)
Definition: SpaceToBatchNd.cpp:34
QLstmLayer.hpp
armnn::BatchMatMul
Definition: BatchMatMulImpl.hpp:16
OutputLayer.hpp