ArmNN  NotReleased
StaticRangeVisitor.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "StaticRangeVisitor.hpp"
7 
8 #include <boost/core/ignore_unused.hpp>
9 #include <armnn/Descriptors.hpp>
10 #include <armnn/Types.hpp>
11 
12 #include <limits>
13 
14 namespace armnn
15 {
16 
18  : m_RangeTracker(rangeTracker)
19 {}
20 
21 void StaticRangeVisitor::SetRange(const IConnectableLayer* layer, unsigned int outputIdx, float min, float max)
22 {
23  m_RangeTracker.SetRange(layer, outputIdx, min, max);
24 }
25 
26 void StaticRangeVisitor::ForwardParentParameters(const IConnectableLayer* layer)
27 {
28  const auto parentRange = m_RangeTracker.GetRange(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), 0);
29  SetRange(layer, 0, parentRange.first, parentRange.second);
30 }
31 
32 void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
33 {
34  boost::ignore_unused(name);
35  SetRange(layer, 0, -20.f, 20.f);
36 }
37 
39  const BatchNormalizationDescriptor& desc,
40  const ConstTensor& mean,
41  const ConstTensor& variance,
42  const ConstTensor& beta,
43  const ConstTensor& gamma,
44  const char* name)
45 {
46  boost::ignore_unused(desc);
47  boost::ignore_unused(mean);
48  boost::ignore_unused(variance);
49  boost::ignore_unused(beta);
50  boost::ignore_unused(gamma);
51  boost::ignore_unused(name);
52  SetRange(layer, 0, -15.0f, 15.0f);
53 }
54 
56  const Convolution2dDescriptor& convolution2dDescriptor,
57  const ConstTensor& weights,
58  const Optional<ConstTensor>& biases,
59  const char* name)
60 {
61  boost::ignore_unused(convolution2dDescriptor);
62  boost::ignore_unused(weights);
63  boost::ignore_unused(biases);
64  boost::ignore_unused(name);
65  SetRange(layer, 0, -15.0f, 15.0f);
66 }
67 
70  const ConstTensor& weights,
71  const Optional<ConstTensor>& biases,
72  const char* name)
73 {
74  boost::ignore_unused(desc);
75  boost::ignore_unused(weights);
76  boost::ignore_unused(biases);
77  boost::ignore_unused(name);
78  SetRange(layer, 0, -15.0f, 15.0f);
79 }
80 
82  const ActivationDescriptor& activationDescriptor,
83  const char* name)
84 {
85  boost::ignore_unused(name);
86  switch (activationDescriptor.m_Function)
87  {
88  // Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
93  SetRange(layer, 0, 0.f, 15.f);
94  break;
96  SetRange(layer, 0, 0.f, activationDescriptor.m_A);
97  break;
99  SetRange(layer, 0, -1.f, 1.f);
100  break;
102  SetRange(layer, 0, -5.f, 15.f);
103  break;
104  default:
105  SetRange(layer, 0, -15.f, 15.f);
106  break;
107  }
108 }
109 
111  const FullyConnectedDescriptor& desc,
112  const ConstTensor& weights,
113  const Optional<ConstTensor>& biases,
114  const char *name)
115 {
116  boost::ignore_unused(desc);
117  boost::ignore_unused(weights);
118  boost::ignore_unused(biases);
119  boost::ignore_unused(name);
120  SetRange(layer, 0, -15.0f, 15.0f);
121 }
122 
124  const PermuteDescriptor& permuteDescriptor,
125  const char* name)
126 {
127  boost::ignore_unused(permuteDescriptor);
128  boost::ignore_unused(name);
129  ForwardParentParameters(layer);
130 }
131 
133  const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
134  const char* name)
135 {
136  boost::ignore_unused(spaceToBatchNdDescriptor);
137  boost::ignore_unused(name);
138  ForwardParentParameters(layer);
139 }
140 
142  const Pooling2dDescriptor& pooling2dDescriptor,
143  const char* name)
144 {
145  boost::ignore_unused(pooling2dDescriptor);
146  boost::ignore_unused(name);
147  ForwardParentParameters(layer);
148 }
149 
151  const SoftmaxDescriptor& softmaxDescriptor,
152  const char* name)
153 {
154  boost::ignore_unused(softmaxDescriptor);
155  boost::ignore_unused(name);
156  SetRange(layer, 0, 0.f, 1.f);
157 }
158 
160  const OriginsDescriptor& originsDescriptor,
161  const char* name)
162 {
163  boost::ignore_unused(originsDescriptor);
164  boost::ignore_unused(name);
165  float min = std::numeric_limits<float>::max();
166  float max = std::numeric_limits<float>::lowest();
167  for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
168  {
169  const IOutputSlot* outputSlot = layer->GetInputSlot(i).GetConnection();
170  LayerGuid layerId = outputSlot->GetOwningLayerGuid();
171  unsigned int slotIndex = outputSlot->CalculateIndexOnOwner();
172  RangeTracker::MinMaxRange range = m_RangeTracker.GetRange(layerId, slotIndex);
173  min = std::min(min, range.first);
174  max = std::max(max, range.second);
175  }
176  SetRange(layer, 0, min, max);
177 }
178 
180  const ConstTensor& input,
181  const char* name)
182 {
183  boost::ignore_unused(name);
184 
185  if (input.GetDataType() != DataType::Float32)
186  {
187  throw InvalidArgumentException("Quantization is supported only for FP32 tensors");
188  }
189 
190  // Work out the range based on the input constants
191  unsigned int inputNumElements = input.GetNumElements();
192  const float* inputData = reinterpret_cast<const float*>(input.GetMemoryArea());
193 
194  float min = std::numeric_limits<float>::max();
195  float max = std::numeric_limits<float>::lowest();
196 
197  for (unsigned int i = 0; i < inputNumElements; i++)
198  {
199  const float inputValue = inputData[i];
200 
201  min = std::min(min, inputValue);
202  max = std::max(max, inputValue);
203  }
204  SetRange(layer, 0, min, max);
205 }
206 
208  const ReshapeDescriptor& reshapeDescriptor,
209  const char* name)
210 {
211  boost::ignore_unused(reshapeDescriptor);
212  boost::ignore_unused(name);
213  ForwardParentParameters(layer);
214 }
215 
217  const SplitterDescriptor& splitterDescriptor,
218  const char* name)
219 {
220  boost::ignore_unused(splitterDescriptor);
221  boost::ignore_unused(name);
222  ForwardParentParameters(layer);
223 }
224 
226  const ResizeBilinearDescriptor& resizeDesc,
227  const char* name)
228 {
229  boost::ignore_unused(resizeDesc);
230  boost::ignore_unused(name);
231  ForwardParentParameters(layer);
232 }
233 
235  const ResizeDescriptor& resizeDescriptor,
236  const char* name)
237 {
238  boost::ignore_unused(resizeDescriptor);
239  boost::ignore_unused(name);
240  ForwardParentParameters(layer);
241 }
242 
244  const StridedSliceDescriptor& stridedSliceDescriptor,
245  const char* name)
246 {
247  boost::ignore_unused(stridedSliceDescriptor);
248  boost::ignore_unused(name);
249  ForwardParentParameters(layer);
250 }
251 
253  const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
254  const char* name)
255 {
256  boost::ignore_unused(batchToSpaceNdDescriptor);
257  boost::ignore_unused(name);
258  ForwardParentParameters(layer);
259 }
260 
261 } //namespace armnn
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
virtual const IOutputSlot * GetConnection() const =0
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
DataType GetDataType() const
Definition: Tensor.hpp:172
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
void VisitReshapeLayer(const IConnectableLayer *layer, const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr) override
void VisitActivationLayer(const IConnectableLayer *layer, const ActivationDescriptor &activationDescriptor, const char *name=nullptr) override
unsigned int GetNumElements() const
Definition: Tensor.hpp:175
void VisitStridedSliceLayer(const IConnectableLayer *layer, const StridedSliceDescriptor &stridedSliceDescriptor, const char *name=nullptr) override
void VisitDepthwiseConvolution2dLayer(const IConnectableLayer *layer, const DepthwiseConvolution2dDescriptor &desc, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
std::pair< float, float > MinMaxRange
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
void VisitPermuteLayer(const IConnectableLayer *layer, const PermuteDescriptor &permuteDescriptor, const char *name) override
void VisitSoftmaxLayer(const IConnectableLayer *layer, const SoftmaxDescriptor &softmaxDescriptor, const char *name=nullptr) override
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
void VisitResizeLayer(const IConnectableLayer *layer, const ResizeDescriptor &resizeDescriptor, const char *name=nullptr) override
void VisitBatchToSpaceNdLayer(const IConnectableLayer *layer, const BatchToSpaceNdDescriptor &batchToSpaceNdDescriptor, const char *name=nullptr) override
virtual LayerGuid GetOwningLayerGuid() const =0
void VisitSpaceToBatchNdLayer(const IConnectableLayer *layer, const SpaceToBatchNdDescriptor &spaceToBatchNdDescriptor, const char *name=nullptr) override
void VisitFullyConnectedLayer(const IConnectableLayer *layer, const FullyConnectedDescriptor &desc, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name) override
A ReshapeDescriptor for the ReshapeLayer.
void VisitConvolution2dLayer(const IConnectableLayer *layer, const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
void VisitResizeBilinearLayer(const IConnectableLayer *layer, const ResizeBilinearDescriptor &resizeDesc, const char *name=nullptr) override
void VisitConstantLayer(const IConnectableLayer *layer, const ConstTensor &input, const char *name=nullptr) override
void VisitAdditionLayer(const IConnectableLayer *layer, const char *name=nullptr) override
Functions to set the Range on a per-layer-type basis.
A FullyConnectedDescriptor for the FullyConnectedLayer.
void VisitConcatLayer(const IConnectableLayer *layer, const OriginsDescriptor &originsDescriptor, const char *name=nullptr) override
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
A SoftmaxDescriptor for the SoftmaxLayer.
An output connection slot for a layer. The output slot may be connected to 1 or more input slots of s...
Definition: INetwork.hpp:37
void SetRange(const IConnectableLayer *layer, unsigned int outputIdx, float min, float max)
Set the range for an output slot on a layer.
void VisitSplitterLayer(const IConnectableLayer *layer, const SplitterDescriptor &splitterDescriptor, const char *name=nullptr) override
void VisitPooling2dLayer(const IConnectableLayer *layer, const Pooling2dDescriptor &pooling2dDescriptor, const char *name) override
MinMaxRange GetRange(LayerGuid guid, unsigned int idx) const
Retrieve the Range for a particular output slot on a particular layer.
StaticRangeVisitor(RangeTracker &rangeTracker)
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:177
A Pooling2dDescriptor for the Pooling2dLayer.
virtual unsigned int GetNumInputSlots() const =0
void VisitBatchNormalizationLayer(const IConnectableLayer *layer, const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr) override
virtual unsigned int CalculateIndexOnOwner() const =0
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A PermuteDescriptor for the PermuteLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
A ResizeDescriptor for the ResizeLayer.
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...
A StridedSliceDescriptor for the StridedSliceLayer.