23.05
|
Go to the documentation of this file.
24 bool checkDataTypeInputandOutput(
const Layer& layer)
26 auto inputInfo = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
27 auto outputInfo = layer.GetOutputSlot(0).GetTensorInfo();
28 bool sameDataType = (inputInfo.GetDataType() == outputInfo.GetDataType());
35 bool sameScale = (inputInfo.GetQuantizationScale() == outputInfo.GetQuantizationScale());
36 bool sameOffset = (inputInfo.GetQuantizationOffset() == outputInfo.GetQuantizationOffset());
38 return (sameScale && sameOffset);
53 template<
typename LayerType>
60 replacementLayer->SetAdditionalInfoForObject(
61 std::make_shared<ActivationDescriptor>(activationDesc));
63 SubgraphView substitutionSubgraph({baseLayer, activationLayer},
64 CreateIInputsFrom({baseLayer}),
65 CreateIOutputsFrom({activationLayer}));
68 optimizationViews.
AddSubstitution({substitutionSubgraph, replacementSubgraph});
70 return replacementLayer;
73 template<
typename LayerType>
83 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
91 return replacementLayer;
94 template<
typename LayerType>
104 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
112 return replacementLayer;
115 template<
typename LayerType>
125 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
133 return replacementLayer;
136 template<
typename LayerType>
146 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
154 return replacementLayer;
157 template<
typename LayerType>
167 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
175 return replacementLayer;
178 template<
typename LayerType>
192 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
200 SubgraphView substitutionSubgraph({baseLayer, activationLayer},
201 CreateIInputsFrom({baseLayer}),
202 CreateIOutputsFrom({activationLayer}));
205 return replacementLayer;
208 template<
typename LayerType>
218 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
227 return replacementLayer;
230 template<
typename LayerType>
240 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
249 return replacementLayer;
252 template<
typename LayerType>
262 LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
271 return replacementLayer;
278 template<
typename LayerType>
284 std::vector<IConnectableLayer*> layers;
287 std::vector<uint32_t> axes;
288 unsigned int recalulatedAxis = 0;
290 for (
unsigned int i = 0; i != desc.
m_vAxis.size(); ++i)
293 TensorInfo layerInfo = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
295 axes.emplace_back(desc.
m_vAxis[i]);
303 std::vector<uint32_t> singleAxis(1, desc.
m_vAxis[i] - recalulatedAxis);
307 newReduceDescriptor.
m_vAxis.assign(singleAxis.begin(), singleAxis.end());
310 std::string layerName =
"reduce_layer_" + std::to_string(i);
312 Layer* replacementLayer = PolymorphicDowncast<Layer*>(
320 layers[i - 1]->GetOutputSlot(0).Connect(replacementLayer->
GetInputSlot(0));
331 layers.emplace_back(replacementLayer);
335 ARMNN_ASSERT(baseLayer->GetOutputSlot(0).GetTensorInfo() ==
336 PolymorphicDowncast<Layer*>(layers.back())->GetOutputSlot().GetTensorInfo());
344 template<
typename LayerType>
347 std::vector<IConnectableLayer*>& layers)
349 std::list<IConnectableLayer*> replacementLayers(layers.begin(), layers.end());
352 SubgraphView replacementSubgraph(std::move(replacementLayers),
353 CreateIInputsFrom({replacementLayers.front()}),
354 CreateIOutputsFrom({replacementLayers.back()}));
356 optimizationViews.
AddSubstitution({substitutionSubgraph, replacementSubgraph});
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D convolution layer to the network.
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Adds a 2D depthwise convolution layer to the network.
LayerType * FuseLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, LayerType *replacementLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc)
LayerType * FuseMultiplicationLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
IConnectableLayer * AddDivisionLayer(const char *name=nullptr)
Adds a division layer to the network.
LayerType * FuseSubtractionLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
An ActivationDescriptor for the ActivationLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
void SetTensorInfo(const TensorInfo &tensorInfo) override
constexpr bool IsQuantizedType()
IConnectableLayer * AddSubtractionLayer(const char *name=nullptr)
Adds a subtraction layer to the network.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
A ReduceDescriptor for the REDUCE operators.
IConnectableLayer * AddElementwiseBinaryLayer(const ElementwiseBinaryDescriptor &elementwiseUnaryDescriptor, const char *name=nullptr)
Add an ElementwiseBinary layer to the network.
LayerType * FuseDivisionLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
void AddSubstitution(SubstitutionPair &&substitution)
const TensorInfo ComputeReductionTensorShape(const armnn::TensorInfo &input, const std::vector< uint32_t > &vAxis, const bool keepDims)
Function to compute the output tensor shape based on the axes and if keepDims is set.
This layer represents an activation operation with the specified activation function.
LayerType * FuseFullyConnectedLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
IConnectableLayer * AddReduceLayer(const ReduceDescriptor &reduceDescriptor, const char *name=nullptr)
Adds a reduce layer to the network.
Copyright (c) 2021 ARM Limited and Contributors.
IConnectableLayer * AddMultiplicationLayer(const char *name=nullptr)
Adds a multiplication layer to the network.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
void ReplaceLayers(OptimizationViews &optimizationViews, LayerType *baseLayer, std::vector< IConnectableLayer * > &layers)
IConnectableLayer * AddAdditionLayer(const char *name=nullptr)
Adds an addition layer to the network.
LayerType * FuseConvolution2dLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
LayerType * FuseBatchNormalizationLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
The SubgraphView class represents a subgraph of a Graph.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
LayerType * FuseDepthwiseConvolution2dLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_ASSERT(COND)
LayerType * FuseAdditionLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, std::string name)
bool m_KeepDims
if true then output shape has no change.
LayerType * FuseElementwiseBinaryLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, ActivationLayer *activationLayer, ActivationDescriptor &activationDesc, BinaryOperation operation, std::string name)
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)
Adds a batch normalization layer to the network.
std::vector< IConnectableLayer * > ChainReduceLayers(OptimizationViews &optimizationViews, LayerType *baseLayer, ReduceDescriptor &desc)
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const char *name=nullptr)
Adds a fully connected layer to the network.
#define ARMNN_NO_DEPRECATE_WARN_END