ArmNN
 20.02
LayerReleaseConstantDataTest.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "CommonTestUtils.hpp"
7 
8 #include <Graph.hpp>
9 
12 
13 #include <boost/cast.hpp>
14 #include <boost/test/unit_test.hpp>
15 
16 #include <utility>
17 
18 using namespace armnn;
19 using namespace std;
20 
21 /////////////////////////////////////////////////////////////////////////////////////////////
22 // The following test are created specifically to test ReleaseConstantData() method in the Layer
23 // They build very simple graphs including the layer will be checked.
24 // Checks weights and biases before the method called and after.
25 /////////////////////////////////////////////////////////////////////////////////////////////
26 
27 BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest)
28 
29 BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
30 {
31  Graph graph;
32 
33  // create the layer we're testing
35  layerDesc.m_Eps = 0.05f;
36  BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
37 
39  layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
40  layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
41  layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
42  layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
43  layer->m_Mean->Allocate();
44  layer->m_Variance->Allocate();
45  layer->m_Beta->Allocate();
46  layer->m_Gamma->Allocate();
47 
48  // create extra layers
49  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
50  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
51 
52  // connect up
53  armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
54  Connect(input, layer, tensorInfo);
55  Connect(layer, output, tensorInfo);
56 
57  // check the constants that they are not NULL
58  BOOST_CHECK(layer->m_Mean != nullptr);
59  BOOST_CHECK(layer->m_Variance != nullptr);
60  BOOST_CHECK(layer->m_Beta != nullptr);
61  BOOST_CHECK(layer->m_Gamma != nullptr);
62 
63  // free up the constants..
64  layer->ReleaseConstantData();
65 
66  // check the constants that they are NULL now
67  BOOST_CHECK(layer->m_Mean == nullptr);
68  BOOST_CHECK(layer->m_Variance == nullptr);
69  BOOST_CHECK(layer->m_Beta == nullptr);
70  BOOST_CHECK(layer->m_Gamma == nullptr);
71 
72  }
73 
74 
75  BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest)
76  {
77  Graph graph;
78 
79  // create the layer we're testing
80  Convolution2dDescriptor layerDesc;
81  layerDesc.m_PadLeft = 3;
82  layerDesc.m_PadRight = 3;
83  layerDesc.m_PadTop = 1;
84  layerDesc.m_PadBottom = 1;
85  layerDesc.m_StrideX = 2;
86  layerDesc.m_StrideY = 4;
87  layerDesc.m_BiasEnabled = true;
88 
89  Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
90 
91  layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
93  layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
95 
96  layer->m_Weight->Allocate();
97  layer->m_Bias->Allocate();
98 
99  // create extra layers
100  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
101  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
102 
103  // connect up
104  Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
105  Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32));
106 
107  // check the constants that they are not NULL
108  BOOST_CHECK(layer->m_Weight != nullptr);
109  BOOST_CHECK(layer->m_Bias != nullptr);
110 
111  // free up the constants..
112  layer->ReleaseConstantData();
113 
114  // check the constants that they are NULL now
115  BOOST_CHECK(layer->m_Weight == nullptr);
116  BOOST_CHECK(layer->m_Bias == nullptr);
117 }
118 
119 BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
120 {
121  Graph graph;
122 
123  // create the layer we're testing
125  layerDesc.m_PadLeft = 3;
126  layerDesc.m_PadRight = 3;
127  layerDesc.m_PadTop = 1;
128  layerDesc.m_PadBottom = 1;
129  layerDesc.m_StrideX = 2;
130  layerDesc.m_StrideY = 4;
131  layerDesc.m_BiasEnabled = true;
132 
133  DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
134 
135  layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
136  layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
137  layer->m_Weight->Allocate();
138  layer->m_Bias->Allocate();
139 
140  // create extra layers
141  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
142  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
143 
144  // connect up
145  Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
146  Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
147 
148  // check the constants that they are not NULL
149  BOOST_CHECK(layer->m_Weight != nullptr);
150  BOOST_CHECK(layer->m_Bias != nullptr);
151 
152  // free up the constants..
153  layer->ReleaseConstantData();
154 
155  // check the constants that they are NULL now
156  BOOST_CHECK(layer->m_Weight == nullptr);
157  BOOST_CHECK(layer->m_Bias == nullptr);
158 }
159 
160 BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
161 {
162  Graph graph;
163 
164  // create the layer we're testing
165  FullyConnectedDescriptor layerDesc;
166  layerDesc.m_BiasEnabled = true;
167  layerDesc.m_TransposeWeightMatrix = true;
168 
169  FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
170 
171  float inputsQScale = 1.0f;
172  float outputQScale = 2.0f;
173 
174  layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
175  DataType::QAsymmU8, inputsQScale, 0));
176  layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
177  GetBiasDataType(DataType::QAsymmU8), inputsQScale));
178  layer->m_Weight->Allocate();
179  layer->m_Bias->Allocate();
180 
181  // create extra layers
182  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
183  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
184 
185  // connect up
186  Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale));
187  Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
188 
189  // check the constants that they are not NULL
190  BOOST_CHECK(layer->m_Weight != nullptr);
191  BOOST_CHECK(layer->m_Bias != nullptr);
192 
193  // free up the constants..
194  layer->ReleaseConstantData();
195 
196  // check the constants that they are NULL now
197  BOOST_CHECK(layer->m_Weight == nullptr);
198  BOOST_CHECK(layer->m_Bias == nullptr);
199 }
200 
202 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
virtual void ReleaseConstantData()
Definition: Layer.cpp:264
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
This layer represents a batch normalization operation.
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
This layer represents a depthwise convolution 2d operation.
std::unique_ptr< ScopedCpuTensorHandle > m_Bias
A unique pointer to store Bias values.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
std::unique_ptr< ScopedCpuTensorHandle > m_Bias
A unique pointer to store Bias values.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
std::unique_ptr< ScopedCpuTensorHandle > m_Gamma
A unique pointer to store Gamma values.
std::unique_ptr< ScopedCpuTensorHandle > m_Variance
A unique pointer to store Variance values.
uint32_t m_PadRight
Padding right value in the width dimension.
Copyright (c) 2020 ARM Limited.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
std::unique_ptr< ScopedCpuTensorHandle > m_Beta
A unique pointer to store Beta values.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a fully connected operation.
uint32_t m_PadTop
Padding top value in the height dimension.
std::unique_ptr< ScopedCpuTensorHandle > m_Mean
A unique pointer to store Mean values.
A FullyConnectedDescriptor for the FullyConnectedLayer.
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
bool m_BiasEnabled
Enable/disable bias.
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
DataType GetBiasDataType(DataType inputDataType)
BOOST_AUTO_TEST_SUITE_END()
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
std::unique_ptr< ScopedCpuTensorHandle > m_Bias
A unique pointer to store Bias values.
This layer represents a convolution 2d operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_PadRight
Padding right value in the width dimension.