ArmNN
 21.02
BatchNormalizationLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
6 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
12 
13 namespace armnn
14 {
15 
17  : LayerWithParameters(1, 1, LayerType::BatchNormalization, param, name)
18 {
19 }
20 
21 std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
22 {
23  // on this level constant data should not be released..
24  ARMNN_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
25  ARMNN_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
26  ARMNN_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
27  ARMNN_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
28 
30  SetAdditionalInfo(descriptor);
31 
32  descriptor.m_Mean = m_Mean.get();
33  descriptor.m_Variance = m_Variance.get();
34  descriptor.m_Beta = m_Beta.get();
35  descriptor.m_Gamma = m_Gamma.get();
36 
37  return factory.CreateBatchNormalization(descriptor, PrepInfoAndDesc(descriptor));
38 }
39 
41 {
42  auto layer = CloneBase<BatchNormalizationLayer>(graph, m_Param, GetName());
43 
44  layer->m_Mean = m_Mean ? std::make_unique<ScopedCpuTensorHandle>(*m_Mean) : nullptr;
45  layer->m_Variance = m_Variance ? std::make_unique<ScopedCpuTensorHandle>(*m_Variance) : nullptr;
46  layer->m_Beta = m_Beta ? std::make_unique<ScopedCpuTensorHandle>(*m_Beta) : nullptr;
47  layer->m_Gamma = m_Gamma ? std::make_unique<ScopedCpuTensorHandle>(*m_Gamma) : nullptr;
48 
49  return std::move(layer);
50 }
51 
53 {
55 
56  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
57 
59 
60  auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
61 
62  ARMNN_ASSERT(inferredShapes.size() == 1);
63 
64  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchNormalizationLayer");
65 
66 }
67 
69 {
70  return {m_Mean, m_Variance, m_Beta, m_Gamma};
71 }
72 
74 {
75  ConstTensor meanTensor(m_Mean->GetTensorInfo(), m_Mean->Map(true));
76  ConstTensor varianceTensor(m_Variance->GetTensorInfo(), m_Variance->Map(true));
77  ConstTensor betaTensor(m_Beta->GetTensorInfo(), m_Beta->Map(true));
78  ConstTensor gammaTensor(m_Gamma->GetTensorInfo(), m_Gamma->Map(true));
80  this, GetParameters(), meanTensor, varianceTensor, betaTensor, gammaTensor, GetName());
81 }
82 
84 {
85  std::vector<armnn::ConstTensor> constTensors { {m_Mean->GetTensorInfo(), m_Mean->Map(true)},
86  {m_Variance->GetTensorInfo(), m_Variance->Map(true)},
87  {m_Beta->GetTensorInfo(), m_Beta->Map(true)},
88  {m_Gamma->GetTensorInfo(), m_Gamma->Map(true)} };
89 
90  strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
91 }
92 
93 } // namespace armnn
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const ConstCpuTensorHandle * m_Gamma
const ConstCpuTensorHandle * m_Beta
This layer represents a batch normalization operation.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the BatchNormalization type.
BatchNormalizationDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const BatchNormalizationDescriptor & GetParameters() const
BatchNormalizationLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:367
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
const ConstCpuTensorHandle * m_Mean
std::unique_ptr< ScopedCpuTensorHandle > m_Gamma
A unique pointer to store Gamma values.
const ConstCpuTensorHandle * m_Variance
std::unique_ptr< ScopedCpuTensorHandle > m_Variance
A unique pointer to store Variance values.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:432
Copyright (c) 2021 ARM Limited and Contributors.
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
virtual void VisitBatchNormalizationLayer(const IConnectableLayer *layer, const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)=0
Function that a batch normalization layer should call back to when its Accept(ILayerVisitor&) functio...
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:392
std::unique_ptr< ScopedCpuTensorHandle > m_Beta
A unique pointer to store Beta values.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:348
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
std::unique_ptr< ScopedCpuTensorHandle > m_Mean
A unique pointer to store Mean values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of BatchNormalizationLayer.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:245
ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
std::vector< std::reference_wrapper< std::unique_ptr< ScopedCpuTensorHandle > >> ConstantTensors
Definition: Layer.hpp:393
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
BatchNormalizationLayer(const BatchNormalizationDescriptor &param, const char *name)
Constructor to create a BatchNormalizationLayer.
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:408
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:419