ArmNN
 20.02
ConstTensorLayerVisitor.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "Network.hpp"
8 
9 #include <boost/test/unit_test.hpp>
10 
11 namespace armnn
12 {
13 
15 {
16  BOOST_CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
17  BOOST_CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
18  BOOST_CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
19  BOOST_CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
20  BOOST_CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
21  BOOST_CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
22  BOOST_CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
23  BOOST_CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
24 }
25 
27  const DepthwiseConvolution2dDescriptor& convolution2dDescriptor)
28 {
29  BOOST_CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
30  BOOST_CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
31  BOOST_CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
32  BOOST_CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
33  BOOST_CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
34  BOOST_CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
35  BOOST_CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
36  BOOST_CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
37 }
38 
40 {
41  BOOST_CHECK(m_Descriptor.m_BiasEnabled == descriptor.m_BiasEnabled);
42  BOOST_CHECK(m_Descriptor.m_TransposeWeightMatrix == descriptor.m_TransposeWeightMatrix);
43 }
44 
46 {
47  BOOST_CHECK(m_Descriptor.m_Eps == descriptor.m_Eps);
48  BOOST_CHECK(m_Descriptor.m_DataLayout == descriptor.m_DataLayout);
49 }
50 
52 {
53  BOOST_CHECK(m_Descriptor.m_ActivationFunc == descriptor.m_ActivationFunc);
54  BOOST_CHECK(m_Descriptor.m_ClippingThresCell == descriptor.m_ClippingThresCell);
55  BOOST_CHECK(m_Descriptor.m_ClippingThresProj == descriptor.m_ClippingThresProj);
56  BOOST_CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
57  BOOST_CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
58  BOOST_CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
59 }
60 
61 void TestLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
62  const ConstTensor* expected,
63  const ConstTensor* actual)
64 {
65  if (expected == nullptr)
66  {
67  BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
68  }
69  else
70  {
71  BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
72  if (actual != nullptr)
73  {
74  CheckConstTensors(*expected, *actual);
75  }
76  }
77 }
78 
80 {
81  CheckConstTensorPtrs("ProjectionBias", m_InputParams.m_ProjectionBias, inputParams.m_ProjectionBias);
82  CheckConstTensorPtrs("ProjectionWeights", m_InputParams.m_ProjectionWeights, inputParams.m_ProjectionWeights);
83  CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
84  CheckConstTensorPtrs("InputToInputWeights",
85  m_InputParams.m_InputToInputWeights, inputParams.m_InputToInputWeights);
86  CheckConstTensorPtrs("InputToForgetWeights",
87  m_InputParams.m_InputToForgetWeights, inputParams.m_InputToForgetWeights);
88  CheckConstTensorPtrs("InputToCellWeights", m_InputParams.m_InputToCellWeights, inputParams.m_InputToCellWeights);
89  CheckConstTensorPtrs(
90  "InputToOutputWeights", m_InputParams.m_InputToOutputWeights, inputParams.m_InputToOutputWeights);
91  CheckConstTensorPtrs(
92  "RecurrentToInputWeights", m_InputParams.m_RecurrentToInputWeights, inputParams.m_RecurrentToInputWeights);
93  CheckConstTensorPtrs(
94  "RecurrentToForgetWeights", m_InputParams.m_RecurrentToForgetWeights, inputParams.m_RecurrentToForgetWeights);
95  CheckConstTensorPtrs(
96  "RecurrentToCellWeights", m_InputParams.m_RecurrentToCellWeights, inputParams.m_RecurrentToCellWeights);
97  CheckConstTensorPtrs(
98  "RecurrentToOutputWeights", m_InputParams.m_RecurrentToOutputWeights, inputParams.m_RecurrentToOutputWeights);
99  CheckConstTensorPtrs(
100  "CellToInputWeights", m_InputParams.m_CellToInputWeights, inputParams.m_CellToInputWeights);
101  CheckConstTensorPtrs(
102  "CellToForgetWeights", m_InputParams.m_CellToForgetWeights, inputParams.m_CellToForgetWeights);
103  CheckConstTensorPtrs(
104  "CellToOutputWeights", m_InputParams.m_CellToOutputWeights, inputParams.m_CellToOutputWeights);
105  CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
106  CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
107  CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
108 }
109 
111  const ConstTensor* expected,
112  const ConstTensor* actual)
113 {
114  if (expected == nullptr)
115  {
116  BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
117  }
118  else
119  {
120  BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
121  if (actual != nullptr)
122  {
123  CheckConstTensors(*expected, *actual);
124  }
125  }
126 }
127 
129 {
130  CheckConstTensorPtrs("InputToInputWeights",
131  m_InputParams.m_InputToInputWeights,
132  inputParams.m_InputToInputWeights);
133 
134  CheckConstTensorPtrs("InputToForgetWeights",
135  m_InputParams.m_InputToForgetWeights,
136  inputParams.m_InputToForgetWeights);
137 
138  CheckConstTensorPtrs("InputToCellWeights",
139  m_InputParams.m_InputToCellWeights,
140  inputParams.m_InputToCellWeights);
141 
142  CheckConstTensorPtrs("InputToOutputWeights",
143  m_InputParams.m_InputToOutputWeights,
144  inputParams.m_InputToOutputWeights);
145 
146  CheckConstTensorPtrs("RecurrentToInputWeights",
147  m_InputParams.m_RecurrentToInputWeights,
148  inputParams.m_RecurrentToInputWeights);
149 
150  CheckConstTensorPtrs("RecurrentToForgetWeights",
151  m_InputParams.m_RecurrentToForgetWeights,
152  inputParams.m_RecurrentToForgetWeights);
153 
154  CheckConstTensorPtrs("RecurrentToCellWeights",
155  m_InputParams.m_RecurrentToCellWeights,
156  inputParams.m_RecurrentToCellWeights);
157 
158  CheckConstTensorPtrs("RecurrentToOutputWeights",
159  m_InputParams.m_RecurrentToOutputWeights,
160  inputParams.m_RecurrentToOutputWeights);
161 
162  CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
163  CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
164  CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
165  CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
166 }
167 
168 BOOST_AUTO_TEST_SUITE(TestConstTensorLayerVisitor)
169 
170 BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
171 {
172  Convolution2dDescriptor descriptor;
173  descriptor.m_PadLeft = 2;
174  descriptor.m_PadRight = 3;
175  descriptor.m_PadBottom = 1;
176  descriptor.m_PadTop = 5;
177  descriptor.m_StrideX = 2;
178  descriptor.m_StrideY = 3;
179  descriptor.m_DataLayout = DataLayout::NHWC;
180 
181  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
182  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
183  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
184 
185  TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
186 
187  Network net;
188 
189  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
190  layer->Accept(visitor);
191 }
192 
193 BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
194 {
195  const char* layerName = "Convolution2dLayer";
196  Convolution2dDescriptor descriptor;
197  descriptor.m_PadLeft = 2;
198  descriptor.m_PadRight = 3;
199  descriptor.m_PadBottom = 1;
200  descriptor.m_PadTop = 5;
201  descriptor.m_StrideX = 2;
202  descriptor.m_StrideY = 3;
203  descriptor.m_DataLayout = DataLayout::NHWC;
204 
205  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
206  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
207  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
208 
209  TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
210 
211  Network net;
212 
213  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
214  layer->Accept(visitor);
215 }
216 
217 BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
218 {
219  Convolution2dDescriptor descriptor;
220  descriptor.m_PadLeft = 2;
221  descriptor.m_PadRight = 3;
222  descriptor.m_PadBottom = 1;
223  descriptor.m_PadTop = 5;
224  descriptor.m_StrideX = 2;
225  descriptor.m_StrideY = 3;
226  descriptor.m_DataLayout = DataLayout::NHWC;
227  descriptor.m_BiasEnabled = true;
228 
229  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
230  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
231  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
232 
233  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
234  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
235  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
236  Optional<ConstTensor> optionalBiases(biases);
237 
238  TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
239 
240  Network net;
241 
242  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
243  layer->Accept(visitor);
244 }
245 
246 BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
247 {
248  const char* layerName = "Convolution2dLayer";
249  Convolution2dDescriptor descriptor;
250  descriptor.m_PadLeft = 2;
251  descriptor.m_PadRight = 3;
252  descriptor.m_PadBottom = 1;
253  descriptor.m_PadTop = 5;
254  descriptor.m_StrideX = 2;
255  descriptor.m_StrideY = 3;
256  descriptor.m_DataLayout = DataLayout::NHWC;
257  descriptor.m_BiasEnabled = true;
258 
259  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
260  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
261  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
262 
263  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
264  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
265  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
266  Optional<ConstTensor> optionalBiases(biases);
267 
268  TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
269 
270  Network net;
271 
272  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
273  layer->Accept(visitor);
274 }
275 
276 BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
277 {
279  descriptor.m_PadLeft = 2;
280  descriptor.m_PadRight = 3;
281  descriptor.m_PadBottom = 1;
282  descriptor.m_PadTop = 5;
283  descriptor.m_StrideX = 2;
284  descriptor.m_StrideY = 3;
285  descriptor.m_DataLayout = DataLayout::NHWC;
286 
287  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
288  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
289  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
290 
291  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
292 
293  Network net;
294 
295  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
296  layer->Accept(visitor);
297 }
298 
299 BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
300 {
301  const char* layerName = "DepthwiseConvolution2dLayer";
303  descriptor.m_PadLeft = 2;
304  descriptor.m_PadRight = 3;
305  descriptor.m_PadBottom = 1;
306  descriptor.m_PadTop = 5;
307  descriptor.m_StrideX = 2;
308  descriptor.m_StrideY = 3;
309  descriptor.m_DataLayout = DataLayout::NHWC;
310 
311  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
312  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
313  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
314 
315  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
316 
317  Network net;
318 
319  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor,
320  weights,
321  EmptyOptional(),
322  layerName);
323  layer->Accept(visitor);
324 }
325 
326 BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
327 {
329  descriptor.m_PadLeft = 2;
330  descriptor.m_PadRight = 3;
331  descriptor.m_PadBottom = 1;
332  descriptor.m_PadTop = 5;
333  descriptor.m_StrideX = 2;
334  descriptor.m_StrideY = 3;
335  descriptor.m_DataLayout = DataLayout::NHWC;
336  descriptor.m_BiasEnabled = true;
337 
338  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
339  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
340  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
341 
342  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
343  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
344  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
345  Optional<ConstTensor> optionalBiases(biases);
346 
347  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
348 
349  Network net;
350 
351  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
352  layer->Accept(visitor);
353 }
354 
355 BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
356 {
357  const char* layerName = "DepthwiseConvolution2dLayer";
359  descriptor.m_PadLeft = 2;
360  descriptor.m_PadRight = 3;
361  descriptor.m_PadBottom = 1;
362  descriptor.m_PadTop = 5;
363  descriptor.m_StrideX = 2;
364  descriptor.m_StrideY = 3;
365  descriptor.m_DataLayout = DataLayout::NHWC;
366  descriptor.m_BiasEnabled = true;
367 
368  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
369  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
370  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
371 
372  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
373  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
374  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
375  Optional<ConstTensor> optionalBiases(biases);
376 
377  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
378 
379  Network net;
380 
381  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
382  layer->Accept(visitor);
383 }
384 
385 BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
386 {
387  FullyConnectedDescriptor descriptor;
388  descriptor.m_TransposeWeightMatrix = true;
389 
390  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
391  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
392  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
393 
394  TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional());
395 
396  Network net;
397 
398  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional());
399  layer->Accept(visitor);
400 }
401 
402 BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
403 {
404  const char* layerName = "FullyConnectedLayer";
405  FullyConnectedDescriptor descriptor;
406  descriptor.m_TransposeWeightMatrix = true;
407 
408  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
409  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
410  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
411 
412  TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional(), layerName);
413 
414  Network net;
415 
416  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional(), layerName);
417  layer->Accept(visitor);
418 }
419 
420 BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
421 {
422  FullyConnectedDescriptor descriptor;
423  descriptor.m_TransposeWeightMatrix = true;
424  descriptor.m_BiasEnabled = true;
425 
426  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
427  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
428  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
429 
430  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
431  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
432  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
433  Optional<ConstTensor> optionalBiases(biases);
434 
435  TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases);
436 
437  Network net;
438 
439  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases);
440  layer->Accept(visitor);
441 }
442 
443 BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
444 {
445  const char* layerName = "FullyConnectedLayer";
446  FullyConnectedDescriptor descriptor;
447  descriptor.m_TransposeWeightMatrix = true;
448  descriptor.m_BiasEnabled = true;
449 
450  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
451  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
452  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32), data);
453 
454  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
455  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
456  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32), biasData);
457  Optional<ConstTensor> optionalBiases(biases);
458 
459  TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases, layerName);
460 
461  Network net;
462 
463  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases, layerName);
464  layer->Accept(visitor);
465 }
466 
467 BOOST_AUTO_TEST_CASE(CheckBatchNormalizationLayer)
468 {
469  BatchNormalizationDescriptor descriptor;
470  descriptor.m_Eps = 0.0002f;
471  descriptor.m_DataLayout = DataLayout::NHWC;
472 
473  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
474  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
475  ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32), data);
476 
477  std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
478  std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
479  ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32), varianceData);
480 
481  std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
482  std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
483  ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32), betaData);
484 
485  std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
486  std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
487  ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32), gammaData);
488 
489  TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
490 
491  Network net;
492 
493  IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
494  layer->Accept(visitor);
495 }
496 
497 BOOST_AUTO_TEST_CASE(CheckNamedBatchNormalizationLayer)
498 {
499  const char* layerName = "BatchNormalizationLayer";
500  BatchNormalizationDescriptor descriptor;
501  descriptor.m_Eps = 0.0002f;
502  descriptor.m_DataLayout = DataLayout::NHWC;
503 
504  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
505  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
506  ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32), data);
507 
508  std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
509  std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
510  ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32), varianceData);
511 
512  std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
513  std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
514  ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32), betaData);
515 
516  std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
517  std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
518  ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32), gammaData);
519 
520  TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
521 
522  Network net;
523 
525  descriptor, mean, variance, beta, gamma, layerName);
526  layer->Accept(visitor);
527 }
528 
529 BOOST_AUTO_TEST_CASE(CheckConstLayer)
530 {
531  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
532  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
533  ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32), data);
534 
535  TestConstantLayerVisitor visitor(input);
536 
537  Network net;
538 
539  IConnectableLayer* const layer = net.AddConstantLayer(input);
540  layer->Accept(visitor);
541 }
542 
543 BOOST_AUTO_TEST_CASE(CheckNamedConstLayer)
544 {
545  const char* layerName = "ConstantLayer";
546  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
547  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
548  ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32), data);
549 
550  TestConstantLayerVisitor visitor(input, layerName);
551 
552  Network net;
553 
554  IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
555  layer->Accept(visitor);
556 }
557 
558 BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
559 {
560  LstmDescriptor descriptor;
561  descriptor.m_ActivationFunc = 3;
562  descriptor.m_ClippingThresProj = 0.5f;
563  descriptor.m_ClippingThresCell = 0.3f;
564  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
565 
566  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
567  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
568  ConstTensor inputToForgetWeights(
569  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
570 
571  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
572  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
573  ConstTensor inputToCellWeights(
574  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
575 
576  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
577  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
578  ConstTensor inputToOutputWeights(
579  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
580 
581  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
582  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
583  ConstTensor recurrentToForgetWeights(TensorInfo(
584  4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
585 
586  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
587  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
588  ConstTensor recurrentToCellWeights(TensorInfo(
589  4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
590 
591  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
592  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
593  ConstTensor recurrentToOutputWeights(TensorInfo(
594  4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
595 
596  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
597  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
598  ConstTensor forgetGateBias(TensorInfo(
599  4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
600 
601  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
602  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
603  ConstTensor cellBias(TensorInfo(
604  4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
605 
606  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
607  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
608  ConstTensor outputGateBias(TensorInfo(
609  4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
610 
611  LstmInputParams params;
612  params.m_InputToForgetWeights = &inputToForgetWeights;
613  params.m_InputToCellWeights = &inputToCellWeights;
614  params.m_InputToOutputWeights = &inputToOutputWeights;
615  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
616  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
617  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
618  params.m_ForgetGateBias = &forgetGateBias;
619  params.m_CellBias = &cellBias;
620  params.m_OutputGateBias = &outputGateBias;
621 
622  TestLstmLayerVisitor visitor(descriptor, params);
623 
624  Network net;
625 
626  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
627  layer->Accept(visitor);
628 }
629 
630 BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
631 {
632  const char* layerName = "LstmLayer";
633  LstmDescriptor descriptor;
634  descriptor.m_ActivationFunc = 3;
635  descriptor.m_ClippingThresProj = 0.5f;
636  descriptor.m_ClippingThresCell = 0.3f;
637  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
638 
639  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
640  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
641  ConstTensor inputToForgetWeights(
642  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
643 
644  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
645  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
646  ConstTensor inputToCellWeights(
647  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
648 
649  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
650  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
651  ConstTensor inputToOutputWeights(
652  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
653 
654  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
655  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
656  ConstTensor recurrentToForgetWeights(TensorInfo(
657  4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
658 
659  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
660  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
661  ConstTensor recurrentToCellWeights(TensorInfo(
662  4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
663 
664  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
665  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
666  ConstTensor recurrentToOutputWeights(TensorInfo(
667  4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
668 
669  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
670  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
671  ConstTensor forgetGateBias(TensorInfo(
672  4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
673 
674  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
675  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
676  ConstTensor cellBias(TensorInfo(
677  4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
678 
679  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
680  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
681  ConstTensor outputGateBias(TensorInfo(
682  4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
683 
684  LstmInputParams params;
685  params.m_InputToForgetWeights = &inputToForgetWeights;
686  params.m_InputToCellWeights = &inputToCellWeights;
687  params.m_InputToOutputWeights = &inputToOutputWeights;
688  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
689  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
690  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
691  params.m_ForgetGateBias = &forgetGateBias;
692  params.m_CellBias = &cellBias;
693  params.m_OutputGateBias = &outputGateBias;
694 
695  TestLstmLayerVisitor visitor(descriptor, params, layerName);
696 
697  Network net;
698 
699  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
700  layer->Accept(visitor);
701 }
702 
703 BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
704 {
705  LstmDescriptor descriptor;
706  descriptor.m_ActivationFunc = 3;
707  descriptor.m_ClippingThresProj = 0.5f;
708  descriptor.m_ClippingThresCell = 0.3f;
709  descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
710 
711  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
712  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
713  ConstTensor inputToForgetWeights(
714  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
715 
716  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
717  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
718  ConstTensor inputToCellWeights(
719  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
720 
721  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
722  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
723  ConstTensor inputToOutputWeights(
724  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
725 
726  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
727  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
728  ConstTensor recurrentToForgetWeights(TensorInfo(
729  4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
730 
731  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
732  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
733  ConstTensor recurrentToCellWeights(TensorInfo(
734  4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
735 
736  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
737  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
738  ConstTensor recurrentToOutputWeights(TensorInfo(
739  4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
740 
741  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
742  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
743  ConstTensor forgetGateBias(TensorInfo(
744  4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
745 
746  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
747  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
748  ConstTensor cellBias(TensorInfo(
749  4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
750 
751  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
752  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
753  ConstTensor outputGateBias(TensorInfo(
754  4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
755 
756  std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
757  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
758  ConstTensor inputToInputWeights(
759  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
760 
761  std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
762  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
763  ConstTensor recurrentToInputWeights(TensorInfo(
764  4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
765 
766  std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
767  std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
768  ConstTensor cellToInputWeights(
769  TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32), cellToInputWeightsData);
770 
771  std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
772  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
773  ConstTensor inputGateBias(
774  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
775 
776  LstmInputParams params;
777  params.m_InputToForgetWeights = &inputToForgetWeights;
778  params.m_InputToCellWeights = &inputToCellWeights;
779  params.m_InputToOutputWeights = &inputToOutputWeights;
780  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
781  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
782  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
783  params.m_ForgetGateBias = &forgetGateBias;
784  params.m_CellBias = &cellBias;
785  params.m_OutputGateBias = &outputGateBias;
786 
787  params.m_InputToInputWeights = &inputToInputWeights;
788  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
789  params.m_CellToInputWeights = &cellToInputWeights;
790  params.m_InputGateBias = &inputGateBias;
791 
792  TestLstmLayerVisitor visitor(descriptor, params);
793 
794  Network net;
795 
796  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
797  layer->Accept(visitor);
798 }
799 
800 BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
801 {
802  const char* layerName = "LstmLayer";
803  LstmDescriptor descriptor;
804  descriptor.m_ActivationFunc = 3;
805  descriptor.m_ClippingThresProj = 0.5f;
806  descriptor.m_ClippingThresCell = 0.3f;
807  descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
808 
809  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
810  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
811  ConstTensor inputToForgetWeights(
812  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
813 
814  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
815  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
816  ConstTensor inputToCellWeights(
817  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
818 
819  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
820  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
821  ConstTensor inputToOutputWeights(
822  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
823 
824  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
825  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
826  ConstTensor recurrentToForgetWeights(TensorInfo(
827  4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
828 
829  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
830  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
831  ConstTensor recurrentToCellWeights(TensorInfo(
832  4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
833 
834  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
835  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
836  ConstTensor recurrentToOutputWeights(TensorInfo(
837  4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
838 
839  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
840  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
841  ConstTensor forgetGateBias(TensorInfo(
842  4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
843 
844  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
845  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
846  ConstTensor cellBias(TensorInfo(
847  4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
848 
849  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
850  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
851  ConstTensor outputGateBias(TensorInfo(
852  4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
853 
854  std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
855  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
856  ConstTensor inputToInputWeights(
857  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32), inputToInputWeightsData);
858 
859  std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
860  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
861  ConstTensor recurrentToInputWeights(TensorInfo(
862  4, recurrentToInputWeightsDimensions.data(), DataType::Float32), recurrentToInputWeightsData);
863 
864  std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
865  std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
866  ConstTensor cellToInputWeights(
867  TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32), cellToInputWeightsData);
868 
869  std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
870  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
871  ConstTensor inputGateBias(
872  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32), inputGateBiasData);
873 
874  LstmInputParams params;
875  params.m_InputToForgetWeights = &inputToForgetWeights;
876  params.m_InputToCellWeights = &inputToCellWeights;
877  params.m_InputToOutputWeights = &inputToOutputWeights;
878  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
879  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
880  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
881  params.m_ForgetGateBias = &forgetGateBias;
882  params.m_CellBias = &cellBias;
883  params.m_OutputGateBias = &outputGateBias;
884 
885  params.m_InputToInputWeights = &inputToInputWeights;
886  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
887  params.m_CellToInputWeights = &cellToInputWeights;
888  params.m_InputGateBias = &inputGateBias;
889 
890  TestLstmLayerVisitor visitor(descriptor, params, layerName);
891 
892  Network net;
893 
894  IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
895  layer->Accept(visitor);
896 }
897 
898 // TODO add one with peephole
899 BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
900 {
901  LstmDescriptor descriptor;
902  descriptor.m_ActivationFunc = 3;
903  descriptor.m_ClippingThresProj = 0.5f;
904  descriptor.m_ClippingThresCell = 0.3f;
905  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
906  descriptor.m_PeepholeEnabled = true;
907 
908  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
909  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
910  ConstTensor inputToForgetWeights(
911  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
912 
913  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
914  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
915  ConstTensor inputToCellWeights(
916  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
917 
918  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
919  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
920  ConstTensor inputToOutputWeights(
921  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
922 
923  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
924  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
925  ConstTensor recurrentToForgetWeights(TensorInfo(
926  4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
927 
928  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
929  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
930  ConstTensor recurrentToCellWeights(TensorInfo(
931  4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
932 
933  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
934  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
935  ConstTensor recurrentToOutputWeights(TensorInfo(
936  4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
937 
938  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
939  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
940  ConstTensor forgetGateBias(TensorInfo(
941  4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
942 
943  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
944  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
945  ConstTensor cellBias(TensorInfo(
946  4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
947 
948  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
949  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
950  ConstTensor outputGateBias(TensorInfo(
951  4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
952 
953  std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
954  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
955  ConstTensor cellToForgetWeights(
956  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
957 
958  std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
959  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
960  ConstTensor cellToOutputWeights(
961  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
962 
963  LstmInputParams params;
964  params.m_InputToForgetWeights = &inputToForgetWeights;
965  params.m_InputToCellWeights = &inputToCellWeights;
966  params.m_InputToOutputWeights = &inputToOutputWeights;
967  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
968  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
969  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
970  params.m_ForgetGateBias = &forgetGateBias;
971  params.m_CellBias = &cellBias;
972  params.m_OutputGateBias = &outputGateBias;
973 
974  params.m_CellToForgetWeights = &cellToForgetWeights;
975  params.m_CellToOutputWeights = &cellToOutputWeights;
976 
977  TestLstmLayerVisitor visitor(descriptor, params);
978 
979  Network net;
980 
981  IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
982  layer->Accept(visitor);
983 }
984 
985 BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
986 {
987  const char* layerName = "LstmLayer";
988  LstmDescriptor descriptor;
989  descriptor.m_ActivationFunc = 3;
990  descriptor.m_ClippingThresProj = 0.5f;
991  descriptor.m_ClippingThresCell = 0.3f;
992  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
993  descriptor.m_PeepholeEnabled = true;
994 
995  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
996  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
997  ConstTensor inputToForgetWeights(
998  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
999 
1000  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1001  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1002  ConstTensor inputToCellWeights(
1003  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
1004 
1005  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1006  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1007  ConstTensor inputToOutputWeights(
1008  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
1009 
1010  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1011  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1012  ConstTensor recurrentToForgetWeights(TensorInfo(
1013  4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
1014 
1015  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1016  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1017  ConstTensor recurrentToCellWeights(TensorInfo(
1018  4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
1019 
1020  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1021  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1022  ConstTensor recurrentToOutputWeights(TensorInfo(
1023  4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
1024 
1025  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1026  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1027  ConstTensor forgetGateBias(TensorInfo(
1028  4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
1029 
1030  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1031  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1032  ConstTensor cellBias(TensorInfo(
1033  4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
1034 
1035  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1036  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1037  ConstTensor outputGateBias(TensorInfo(
1038  4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
1039 
1040  std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1041  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1042  ConstTensor cellToForgetWeights(
1043  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32), cellToForgetWeightsData);
1044 
1045  std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1046  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1047  ConstTensor cellToOutputWeights(
1048  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32), cellToOutputWeightsData);
1049 
1050  LstmInputParams params;
1051  params.m_InputToForgetWeights = &inputToForgetWeights;
1052  params.m_InputToCellWeights = &inputToCellWeights;
1053  params.m_InputToOutputWeights = &inputToOutputWeights;
1054  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1055  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1056  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1057  params.m_ForgetGateBias = &forgetGateBias;
1058  params.m_CellBias = &cellBias;
1059  params.m_OutputGateBias = &outputGateBias;
1060 
1061  params.m_CellToForgetWeights = &cellToForgetWeights;
1062  params.m_CellToOutputWeights = &cellToOutputWeights;
1063 
1064  TestLstmLayerVisitor visitor(descriptor, params, layerName);
1065 
1066  Network net;
1067 
1068  IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
1069  layer->Accept(visitor);
1070 }
1071 
1072 // TODO add one with projection
1073 BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
1074 {
1075  LstmDescriptor descriptor;
1076  descriptor.m_ActivationFunc = 3;
1077  descriptor.m_ClippingThresProj = 0.5f;
1078  descriptor.m_ClippingThresCell = 0.3f;
1079  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
1080  descriptor.m_ProjectionEnabled = true;
1081 
1082  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1083  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1084  ConstTensor inputToForgetWeights(
1085  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
1086 
1087  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1088  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1089  ConstTensor inputToCellWeights(
1090  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
1091 
1092  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1093  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1094  ConstTensor inputToOutputWeights(
1095  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
1096 
1097  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1098  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1099  ConstTensor recurrentToForgetWeights(TensorInfo(
1100  4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
1101 
1102  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1103  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1104  ConstTensor recurrentToCellWeights(TensorInfo(
1105  4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
1106 
1107  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1108  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1109  ConstTensor recurrentToOutputWeights(TensorInfo(
1110  4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
1111 
1112  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1113  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1114  ConstTensor forgetGateBias(TensorInfo(
1115  4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
1116 
1117  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1118  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1119  ConstTensor cellBias(TensorInfo(
1120  4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
1121 
1122  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1123  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1124  ConstTensor outputGateBias(TensorInfo(
1125  4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
1126 
1127  std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1128  std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
1129  ConstTensor projectionBias(
1130  TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32), projectionBiasData);
1131 
1132  std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1133  std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
1134  ConstTensor projectionWeights(
1135  TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32), projectionWeightsData);
1136 
1137  LstmInputParams params;
1138  params.m_InputToForgetWeights = &inputToForgetWeights;
1139  params.m_InputToCellWeights = &inputToCellWeights;
1140  params.m_InputToOutputWeights = &inputToOutputWeights;
1141  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1142  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1143  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1144  params.m_ForgetGateBias = &forgetGateBias;
1145  params.m_CellBias = &cellBias;
1146  params.m_OutputGateBias = &outputGateBias;
1147 
1148  params.m_ProjectionWeights = &projectionWeights;
1149  params.m_ProjectionBias = &projectionBias;
1150 
1151  TestLstmLayerVisitor visitor(descriptor, params);
1152 
1153  Network net;
1154 
1155  IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params);
1156  layer->Accept(visitor);
1157 }
1158 
1159 BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
1160 {
1161  const char* layerName = "LstmLayer";
1162  LstmDescriptor descriptor;
1163  descriptor.m_ActivationFunc = 3;
1164  descriptor.m_ClippingThresProj = 0.5f;
1165  descriptor.m_ClippingThresCell = 0.3f;
1166  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
1167  descriptor.m_ProjectionEnabled = true;
1168 
1169  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1170  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1171  ConstTensor inputToForgetWeights(
1172  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32), inputToForgetWeightsData);
1173 
1174  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1175  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1176  ConstTensor inputToCellWeights(
1177  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32), inputToCellWeightsData);
1178 
1179  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1180  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1181  ConstTensor inputToOutputWeights(
1182  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32), inputToOutputWeightsData);
1183 
1184  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1185  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1186  ConstTensor recurrentToForgetWeights(TensorInfo(
1187  4, recurrentToForgetWeightsDimensions.data(), DataType::Float32), recurrentToForgetWeightsData);
1188 
1189  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1190  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1191  ConstTensor recurrentToCellWeights(TensorInfo(
1192  4, recurrentToCellWeightsDimensions.data(), DataType::Float32), recurrentToCellWeightsData);
1193 
1194  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1195  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1196  ConstTensor recurrentToOutputWeights(TensorInfo(
1197  4, recurrentToOutputWeightsDimensions.data(), DataType::Float32), recurrentToOutputWeightsData);
1198 
1199  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1200  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1201  ConstTensor forgetGateBias(TensorInfo(
1202  4, forgetGateBiasDimensions.data(), DataType::Float32), forgetGateBiasData);
1203 
1204  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1205  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1206  ConstTensor cellBias(TensorInfo(
1207  4, cellBiasDimensions.data(), DataType::Float32), cellBiasData);
1208 
1209  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1210  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1211  ConstTensor outputGateBias(TensorInfo(
1212  4, outputGateBiasDimensions.data(), DataType::Float32), outputGateBiasData);
1213 
1214  std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1215  std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
1216  ConstTensor projectionBias(
1217  TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32), projectionBiasData);
1218 
1219  std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1220  std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
1221  ConstTensor projectionWeights(
1222  TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32), projectionWeightsData);
1223 
1224  LstmInputParams params;
1225  params.m_InputToForgetWeights = &inputToForgetWeights;
1226  params.m_InputToCellWeights = &inputToCellWeights;
1227  params.m_InputToOutputWeights = &inputToOutputWeights;
1228  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1229  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1230  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1231  params.m_ForgetGateBias = &forgetGateBias;
1232  params.m_CellBias = &cellBias;
1233  params.m_OutputGateBias = &outputGateBias;
1234 
1235  params.m_ProjectionWeights = &projectionWeights;
1236  params.m_ProjectionBias = &projectionBias;
1237 
1238  TestLstmLayerVisitor visitor(descriptor, params, layerName);
1239 
1240  Network net;
1241 
1242  IConnectableLayer *const layer = net.AddLstmLayer(descriptor, params, layerName);
1243  layer->Accept(visitor);
1244 }
1245 
1246 BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
1247 {
1248  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1249  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
1250  ConstTensor inputToInputWeights(
1251  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
1252 
1253  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1254  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1255  ConstTensor inputToForgetWeights(
1256  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
1257 
1258  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1259  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1260  ConstTensor inputToCellWeights(
1261  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
1262 
1263  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1264  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1265  ConstTensor inputToOutputWeights(
1266  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
1267 
1268 
1269  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1270  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
1271  ConstTensor recurrentToInputWeights(TensorInfo(
1272  4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
1273 
1274  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1275  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1276  ConstTensor recurrentToForgetWeights(TensorInfo(
1277  4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
1278 
1279  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1280  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1281  ConstTensor recurrentToCellWeights(TensorInfo(
1282  4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
1283 
1284  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1285  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1286  ConstTensor recurrentToOutputWeights(TensorInfo(
1287  4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
1288 
1289 
1290  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1291  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
1292  ConstTensor inputGateBias(
1293  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
1294 
1295  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1296  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1297  ConstTensor forgetGateBias(TensorInfo(
1298  4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
1299 
1300  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1301  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1302  ConstTensor cellBias(TensorInfo(
1303  4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
1304 
1305  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1306  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1307  ConstTensor outputGateBias(TensorInfo(
1308  4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
1309 
1310  QuantizedLstmInputParams params;
1311 
1312  params.m_InputToInputWeights = &inputToInputWeights;
1313  params.m_InputToForgetWeights = &inputToForgetWeights;
1314  params.m_InputToCellWeights = &inputToCellWeights;
1315  params.m_InputToOutputWeights = &inputToOutputWeights;
1316 
1317  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
1318  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1319  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1320  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1321 
1322  params.m_InputGateBias = &inputGateBias;
1323  params.m_ForgetGateBias = &forgetGateBias;
1324  params.m_CellBias = &cellBias;
1325  params.m_OutputGateBias = &outputGateBias;
1326 
1327  TestQuantizedLstmLayerVisitor visitor(params);
1328 
1329  Network net;
1330 
1331  IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params);
1332  layer->Accept(visitor);
1333 }
1334 
1335 BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer)
1336 {
1337  const char* layerName = "LstmLayer";
1338  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1339  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
1340  ConstTensor inputToInputWeights(
1341  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
1342 
1343  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1344  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1345  ConstTensor inputToForgetWeights(
1346  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
1347 
1348  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1349  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1350  ConstTensor inputToCellWeights(
1351  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
1352 
1353  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1354  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1355  ConstTensor inputToOutputWeights(
1356  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
1357 
1358 
1359  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1360  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
1361  ConstTensor recurrentToInputWeights(TensorInfo(
1362  4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
1363 
1364  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1365  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1366  ConstTensor recurrentToForgetWeights(TensorInfo(
1367  4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
1368 
1369  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1370  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1371  ConstTensor recurrentToCellWeights(TensorInfo(
1372  4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
1373 
1374  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1375  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1376  ConstTensor recurrentToOutputWeights(TensorInfo(
1377  4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
1378 
1379 
1380  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1381  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
1382  ConstTensor inputGateBias(
1383  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32), inputGateBiasData);
1384 
1385  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1386  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1387  ConstTensor forgetGateBias(TensorInfo(
1388  4, forgetGateBiasDimensions.data(), DataType::Signed32), forgetGateBiasData);
1389 
1390  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1391  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1392  ConstTensor cellBias(TensorInfo(
1393  4, cellBiasDimensions.data(), DataType::Signed32), cellBiasData);
1394 
1395  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1396  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1397  ConstTensor outputGateBias(TensorInfo(
1398  4, outputGateBiasDimensions.data(), DataType::Signed32), outputGateBiasData);
1399 
1400  QuantizedLstmInputParams params;
1401 
1402  params.m_InputToInputWeights = &inputToInputWeights;
1403  params.m_InputToForgetWeights = &inputToForgetWeights;
1404  params.m_InputToCellWeights = &inputToCellWeights;
1405  params.m_InputToOutputWeights = &inputToOutputWeights;
1406 
1407  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
1408  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1409  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1410  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1411 
1412  params.m_InputGateBias = &inputGateBias;
1413  params.m_ForgetGateBias = &forgetGateBias;
1414  params.m_CellBias = &cellBias;
1415  params.m_OutputGateBias = &outputGateBias;
1416 
1417  TestQuantizedLstmLayerVisitor visitor(params, layerName);
1418 
1419  Network net;
1420 
1421  IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params, layerName);
1422  layer->Accept(visitor);
1423 }
1424 
1426 
1427 } // namespace armnn
void CheckDescriptor(const BatchNormalizationDescriptor &descriptor)
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void CheckConstTensorPtrs(const std::string &name, const ConstTensor *expected, const ConstTensor *actual)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
bool m_BiasEnabled
Enable/disable bias.
const ConstTensor * m_RecurrentToOutputWeights
uint32_t m_PadBottom
Padding bottom value in the height dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
const ConstTensor * m_RecurrentToForgetWeights
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
void CheckDescriptor(const FullyConnectedDescriptor &descriptor)
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
Adds a 2D depthwise convolution layer to the network.
Definition: Network.cpp:1193
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
uint32_t m_PadRight
Padding right value in the width dimension.
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
Copyright (c) 2020 ARM Limited.
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
Adds a fully connected layer to the network.
Definition: Network.cpp:1086
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
void CheckConstTensors(const ConstTensor &expected, const ConstTensor &actual)
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr) override
Adds a 2D convolution layer to the network.
Definition: Network.cpp:1139
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
An LstmDescriptor for the LstmLayer.
uint32_t m_PadTop
Padding top value in the height dimension.
void CheckDescriptor(const LstmDescriptor &descriptor)
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
const ConstTensor * m_InputToForgetWeights
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
bool m_PeepholeEnabled
Enable/disable peephole.
void CheckDescriptor(const Convolution2dDescriptor &convolution2dDescriptor)
void CheckInputParameters(const LstmInputParams &inputParams)
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
void CheckInputParameters(const QuantizedLstmInputParams &inputParams)
uint32_t m_ActivationFunc
The activation function to use.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CheckDescriptor(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor)
const ConstTensor * m_RecurrentToInputWeights
float m_ClippingThresCell
Clipping threshold value for the cell state.
Private implementation of INetwork.
Definition: Network.hpp:28
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
BOOST_AUTO_TEST_SUITE_END()
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr) override
Adds a batch normalization layer to the network.
Definition: Network.cpp:1315
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
virtual void Accept(ILayerVisitor &visitor) const =0
Apply a visitor to this layer.
const ConstTensor * m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
const ConstTensor * m_InputToOutputWeights
IConnectableLayer * AddConstantLayer(const ConstTensor &input, const char *name=nullptr) override
Adds a layer with no inputs and a single output, which always corresponds to the passed in constant t...
Definition: Network.cpp:1368
void CheckConstTensorPtrs(const std::string &name, const ConstTensor *expected, const ConstTensor *actual)
IConnectableLayer * AddLstmLayer(const LstmDescriptor &descriptor, const LstmInputParams &params, const char *name=nullptr) override
Add a Lstm layer to the network.
Definition: Network.cpp:1400
IConnectableLayer * AddQuantizedLstmLayer(const QuantizedLstmInputParams &params, const char *name=nullptr) override
Add a QuantizedLstm layer to the network.
Definition: Network.cpp:1636
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
uint32_t m_PadRight
Padding right value in the width dimension.
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40