ArmNN
 22.08
LayerReleaseConstantDataTest.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <CommonTestUtils.hpp>
7 
8 #include <Graph.hpp>
9 
12 
13 #include <doctest/doctest.h>
14 
15 #include <utility>
16 
17 using namespace armnn;
18 using namespace std;
19 
20 /////////////////////////////////////////////////////////////////////////////////////////////
21 // The following test are created specifically to test ReleaseConstantData() method in the Layer
22 // They build very simple graphs including the layer will be checked.
23 // Checks weights and biases before the method called and after.
24 /////////////////////////////////////////////////////////////////////////////////////////////
25 
26 TEST_SUITE("LayerReleaseConstantDataTest")
27 {
28 TEST_CASE("ReleaseBatchNormalizationLayerConstantDataTest")
29 {
30  Graph graph;
31 
32  // create the layer we're testing
34  layerDesc.m_Eps = 0.05f;
35  BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
36 
38  layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
39  layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
40  layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
41  layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
42  layer->m_Mean->Allocate();
43  layer->m_Variance->Allocate();
44  layer->m_Beta->Allocate();
45  layer->m_Gamma->Allocate();
46 
47  // create extra layers
48  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
49  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
50 
51  // connect up
52  armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
53  Connect(input, layer, tensorInfo);
54  Connect(layer, output, tensorInfo);
55 
56  // check the constants that they are not NULL
57  CHECK(layer->m_Mean != nullptr);
58  CHECK(layer->m_Variance != nullptr);
59  CHECK(layer->m_Beta != nullptr);
60  CHECK(layer->m_Gamma != nullptr);
61 
62  // free up the constants..
63  layer->ReleaseConstantData();
64 
65  // check the constants that they are NULL now
66  CHECK(layer->m_Mean == nullptr);
67  CHECK(layer->m_Variance == nullptr);
68  CHECK(layer->m_Beta == nullptr);
69  CHECK(layer->m_Gamma == nullptr);
70 
71  }
72 
73 TEST_CASE("ReleaseConvolution2dLayerConstantDataTest")
74 {
75  Graph graph;
76 
77  // create the layer we're testing
78  Convolution2dDescriptor layerDesc;
79  layerDesc.m_PadLeft = 3;
80  layerDesc.m_PadRight = 3;
81  layerDesc.m_PadTop = 1;
82  layerDesc.m_PadBottom = 1;
83  layerDesc.m_StrideX = 2;
84  layerDesc.m_StrideY = 4;
85  layerDesc.m_BiasEnabled = true;
86 
87  Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
88 
89  layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 5, 3 },
91  layer->m_Bias = std::make_unique<ScopedTensorHandle>
93 
94  layer->m_Weight->Allocate();
95  layer->m_Bias->Allocate();
96 
97  ConstantLayer* weightsLayer = graph.AddLayer<ConstantLayer>("Weights");
98  ConstantLayer* biasLayer = graph.AddLayer<ConstantLayer>("Bias");
99 
100  weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(TensorInfo({ 2, 3, 5, 3 },
102 
103  biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(
105 
106  TensorInfo weightsInfo = weightsLayer->m_LayerOutput->GetTensorInfo();
107  weightsInfo.SetConstant();
108  TensorInfo biasInfo = biasLayer->m_LayerOutput->GetTensorInfo();
109  biasInfo.SetConstant();
110 
111  weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
112  biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
113 
114  // create extra layers
115  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
116  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
117 
118  // connect up
119  Connect(input, layer, TensorInfo({ 2, 3, 8, 16 }, armnn::DataType::Float32));
120  weightsLayer->GetOutputSlot().Connect(layer->GetInputSlot(1));
121  biasLayer->GetOutputSlot().Connect(layer->GetInputSlot(2));
122  Connect(layer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32));
123 
124  // check the constants that they are not NULL
125  CHECK(weightsLayer->m_LayerOutput != nullptr);
126  CHECK(biasLayer->m_LayerOutput != nullptr);
127 
128  // free up the constants..
129  layer->ReleaseConstantData();
130 
131  // check the constants that they are NULL now
132  CHECK(weightsLayer->m_LayerOutput == nullptr);
133  CHECK(biasLayer->m_LayerOutput == nullptr);
134 }
135 
136 TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")
137 {
138  Graph graph;
139 
140  // create the layer we're testing
142  layerDesc.m_PadLeft = 3;
143  layerDesc.m_PadRight = 3;
144  layerDesc.m_PadTop = 1;
145  layerDesc.m_PadBottom = 1;
146  layerDesc.m_StrideX = 2;
147  layerDesc.m_StrideY = 4;
148  layerDesc.m_BiasEnabled = true;
149 
150  DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
151 
152  layer->m_Weight = std::make_unique<ScopedTensorHandle>(
153  TensorInfo({3, 3, 5, 3}, DataType::Float32));
154  layer->m_Bias = std::make_unique<ScopedTensorHandle>(
156  layer->m_Weight->Allocate();
157  layer->m_Bias->Allocate();
158 
159  // create extra layers
160  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
161  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
162 
163  // connect up
164  Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
165  Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
166 
167  // check the constants that they are not NULL
168  CHECK(layer->m_Weight != nullptr);
169  CHECK(layer->m_Bias != nullptr);
170 
171  // free up the constants..
172  layer->ReleaseConstantData();
173 
174  // check the constants that they are NULL now
175  CHECK(layer->m_Weight == nullptr);
176  CHECK(layer->m_Bias == nullptr);
177 }
178 
179 TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest")
180 {
181  Graph graph;
182 
183  // create the layer we're testing
184  FullyConnectedDescriptor layerDesc;
185  layerDesc.m_BiasEnabled = true;
186  layerDesc.m_TransposeWeightMatrix = true;
187 
188  FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
189 
190  float inputsQScale = 1.0f;
191  float outputQScale = 2.0f;
192 
193  layer->m_Weight = std::make_unique<ScopedTensorHandle>(
194  TensorInfo({7, 20}, DataType::QAsymmU8, inputsQScale, 0));
195  layer->m_Bias = std::make_unique<ScopedTensorHandle>(
196  TensorInfo({7}, GetBiasDataType(DataType::QAsymmU8), inputsQScale));
197  layer->m_Weight->Allocate();
198  layer->m_Bias->Allocate();
199 
200  // create extra layers
201  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
202  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
203 
204  // connect up
205  Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale));
206  Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
207 
208  // check the constants that they are not NULL
209  CHECK(layer->m_Weight != nullptr);
210  CHECK(layer->m_Bias != nullptr);
211 
212  // free up the constants..
213  layer->ReleaseConstantData();
214 
215  // check the constants that they are NULL now
216  CHECK(layer->m_Weight == nullptr);
217  CHECK(layer->m_Bias == nullptr);
218 }
219 
220 }
221 
TEST_SUITE("TestConstTensorLayerVisitor")
A layer that the constant data can be bound to.
virtual void ReleaseConstantData()
Definition: Layer.cpp:304
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
This layer represents a batch normalization operation.
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
This layer represents a depthwise convolution 2d operation.
std::shared_ptr< ConstTensorHandle > m_LayerOutput
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:456
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
std::shared_ptr< ConstTensorHandle > m_Mean
A unique pointer to store Mean values.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2021 ARM Limited and Contributors.
std::shared_ptr< ConstTensorHandle > m_Beta
A unique pointer to store Beta values.
uint32_t m_PadTop
Padding top value in the height dimension.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:324
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a fully connected operation.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
uint32_t m_PadTop
Padding top value in the height dimension.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
std::shared_ptr< ConstTensorHandle > m_Gamma
A unique pointer to store Gamma values.
std::shared_ptr< ConstTensorHandle > m_Variance
A unique pointer to store Variance values.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
DataType GetBiasDataType(DataType inputDataType)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:87
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:514
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:14
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_PadRight
Padding right value in the width dimension.