ArmNN
 22.02
ConstTensorLayerVisitor.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "Network.hpp"
8 
9 #include <doctest/doctest.h>
10 
11 namespace armnn
12 {
13 
15 {
16  CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
17  CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
18  CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
19  CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
20  CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
21  CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
22  CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
23  CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
24 }
25 
27  const DepthwiseConvolution2dDescriptor& convolution2dDescriptor)
28 {
29  CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
30  CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
31  CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
32  CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
33  CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
34  CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
35  CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
36  CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
37 }
38 
40 {
41  CHECK(m_Descriptor.m_BiasEnabled == descriptor.m_BiasEnabled);
42  CHECK(m_Descriptor.m_TransposeWeightMatrix == descriptor.m_TransposeWeightMatrix);
43 }
44 
46 {
47  CHECK(m_Descriptor.m_Eps == descriptor.m_Eps);
48  CHECK(m_Descriptor.m_DataLayout == descriptor.m_DataLayout);
49 }
50 
52 {
53  CHECK(m_Descriptor.m_ActivationFunc == descriptor.m_ActivationFunc);
54  CHECK(m_Descriptor.m_ClippingThresCell == descriptor.m_ClippingThresCell);
55  CHECK(m_Descriptor.m_ClippingThresProj == descriptor.m_ClippingThresProj);
56  CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
57  CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
58  CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
59 }
60 
62 {
63  CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
64  CHECK(m_Descriptor.m_ProjectionClip == descriptor.m_ProjectionClip);
65  CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
66  CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
67  CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
68 }
69 
71 {
72  CheckConstTensorPtrs("InputToInputWeights",
73  m_InputParams.m_InputToInputWeights,
74  inputParams.m_InputToInputWeights);
75 
76  CheckConstTensorPtrs("InputToForgetWeights",
77  m_InputParams.m_InputToForgetWeights,
78  inputParams.m_InputToForgetWeights);
79 
80  CheckConstTensorPtrs("InputToCellWeights",
81  m_InputParams.m_InputToCellWeights,
82  inputParams.m_InputToCellWeights);
83 
84  CheckConstTensorPtrs("InputToOutputWeights",
85  m_InputParams.m_InputToOutputWeights,
86  inputParams.m_InputToOutputWeights);
87 
88  CheckConstTensorPtrs("RecurrentToInputWeights",
89  m_InputParams.m_RecurrentToInputWeights,
90  inputParams.m_RecurrentToInputWeights);
91 
92  CheckConstTensorPtrs("RecurrentToForgetWeights",
93  m_InputParams.m_RecurrentToForgetWeights,
94  inputParams.m_RecurrentToForgetWeights);
95 
96  CheckConstTensorPtrs("RecurrentToCellWeights",
97  m_InputParams.m_RecurrentToCellWeights,
98  inputParams.m_RecurrentToCellWeights);
99 
100  CheckConstTensorPtrs("RecurrentToOutputWeights",
101  m_InputParams.m_RecurrentToOutputWeights,
102  inputParams.m_RecurrentToOutputWeights);
103 
104  CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
105  CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
106  CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
107  CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
108 }
109 
110 TEST_SUITE("TestConstTensorLayerVisitor")
111 {
112 TEST_CASE("CheckConvolution2dLayer")
113 {
114  Convolution2dDescriptor descriptor;
115  descriptor.m_PadLeft = 2;
116  descriptor.m_PadRight = 3;
117  descriptor.m_PadBottom = 1;
118  descriptor.m_PadTop = 5;
119  descriptor.m_StrideX = 2;
120  descriptor.m_StrideY = 3;
121  descriptor.m_DataLayout = DataLayout::NHWC;
122 
123  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
124  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
125  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
126 
127  TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
128 
129  NetworkImpl net;
130 
131  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
132  layer->ExecuteStrategy(visitor);
133 }
134 
135 TEST_CASE("CheckNamedConvolution2dLayer")
136 {
137  const char* layerName = "Convolution2dLayer";
138  Convolution2dDescriptor descriptor;
139  descriptor.m_PadLeft = 2;
140  descriptor.m_PadRight = 3;
141  descriptor.m_PadBottom = 1;
142  descriptor.m_PadTop = 5;
143  descriptor.m_StrideX = 2;
144  descriptor.m_StrideY = 3;
145  descriptor.m_DataLayout = DataLayout::NHWC;
146 
147  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
148  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
149  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
150 
151  TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
152 
153  NetworkImpl net;
154 
155  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
156  layer->ExecuteStrategy(visitor);
157 }
158 
159 TEST_CASE("CheckConvolution2dLayerWithBiases")
160 {
161  Convolution2dDescriptor descriptor;
162  descriptor.m_PadLeft = 2;
163  descriptor.m_PadRight = 3;
164  descriptor.m_PadBottom = 1;
165  descriptor.m_PadTop = 5;
166  descriptor.m_StrideX = 2;
167  descriptor.m_StrideY = 3;
168  descriptor.m_DataLayout = DataLayout::NHWC;
169  descriptor.m_BiasEnabled = true;
170 
171  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
172  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
173  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
174 
175  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
176  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
177  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
178  Optional<ConstTensor> optionalBiases(biases);
179 
180  TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
181 
182  NetworkImpl net;
183 
184  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
185  layer->ExecuteStrategy(visitor);
186 }
187 
188 TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
189 {
190  const char* layerName = "Convolution2dLayer";
191  Convolution2dDescriptor descriptor;
192  descriptor.m_PadLeft = 2;
193  descriptor.m_PadRight = 3;
194  descriptor.m_PadBottom = 1;
195  descriptor.m_PadTop = 5;
196  descriptor.m_StrideX = 2;
197  descriptor.m_StrideY = 3;
198  descriptor.m_DataLayout = DataLayout::NHWC;
199  descriptor.m_BiasEnabled = true;
200 
201  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
202  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
203  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
204 
205  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
206  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
207  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
208  Optional<ConstTensor> optionalBiases(biases);
209 
210  TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
211 
212  NetworkImpl net;
213 
214  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
215  layer->ExecuteStrategy(visitor);
216 }
217 
218 TEST_CASE("CheckDepthwiseConvolution2dLayer")
219 {
221  descriptor.m_PadLeft = 2;
222  descriptor.m_PadRight = 3;
223  descriptor.m_PadBottom = 1;
224  descriptor.m_PadTop = 5;
225  descriptor.m_StrideX = 2;
226  descriptor.m_StrideY = 3;
227  descriptor.m_DataLayout = DataLayout::NHWC;
228 
229  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
230  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
231  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
232 
233  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
234 
235  NetworkImpl net;
236 
237  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
238  layer->ExecuteStrategy(visitor);
239 }
240 
241 TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
242 {
243  const char* layerName = "DepthwiseConvolution2dLayer";
245  descriptor.m_PadLeft = 2;
246  descriptor.m_PadRight = 3;
247  descriptor.m_PadBottom = 1;
248  descriptor.m_PadTop = 5;
249  descriptor.m_StrideX = 2;
250  descriptor.m_StrideY = 3;
251  descriptor.m_DataLayout = DataLayout::NHWC;
252 
253  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
254  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
255  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
256 
257  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
258 
259  NetworkImpl net;
260 
261  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor,
262  weights,
263  EmptyOptional(),
264  layerName);
265  layer->ExecuteStrategy(visitor);
266 }
267 
268 TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
269 {
271  descriptor.m_PadLeft = 2;
272  descriptor.m_PadRight = 3;
273  descriptor.m_PadBottom = 1;
274  descriptor.m_PadTop = 5;
275  descriptor.m_StrideX = 2;
276  descriptor.m_StrideY = 3;
277  descriptor.m_DataLayout = DataLayout::NHWC;
278  descriptor.m_BiasEnabled = true;
279 
280  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
281  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
282  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
283 
284  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
285  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
286  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
287  Optional<ConstTensor> optionalBiases(biases);
288 
289  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
290 
291  NetworkImpl net;
292 
293  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
294  layer->ExecuteStrategy(visitor);
295 }
296 
297 TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
298 {
299  const char* layerName = "DepthwiseConvolution2dLayer";
301  descriptor.m_PadLeft = 2;
302  descriptor.m_PadRight = 3;
303  descriptor.m_PadBottom = 1;
304  descriptor.m_PadTop = 5;
305  descriptor.m_StrideX = 2;
306  descriptor.m_StrideY = 3;
307  descriptor.m_DataLayout = DataLayout::NHWC;
308  descriptor.m_BiasEnabled = true;
309 
310  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
311  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
312  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
313 
314  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
315  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
316  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
317  Optional<ConstTensor> optionalBiases(biases);
318 
319  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
320 
321  NetworkImpl net;
322 
323  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
324  layer->ExecuteStrategy(visitor);
325 }
326 
327 TEST_CASE("CheckFullyConnectedLayer")
328 {
329  FullyConnectedDescriptor descriptor;
330  descriptor.m_TransposeWeightMatrix = true;
331  descriptor.m_ConstantWeights = true;
332  descriptor.m_BiasEnabled = false;
333 
334  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
335  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
336  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
337 
338  TestConstantLayerVisitor weightsVisitor(weights);
339  TestFullyConnectedLayerVistor visitor(descriptor);
340 
341  NetworkImpl net;
342 
343  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
344  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
345  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
346 
347  weightsLayer->ExecuteStrategy(weightsVisitor);
348  layer->ExecuteStrategy(visitor);
349 }
350 
351 TEST_CASE("CheckNamedFullyConnectedLayer")
352 {
353  const char* layerName = "FullyConnectedLayer";
354  FullyConnectedDescriptor descriptor;
355  descriptor.m_TransposeWeightMatrix = true;
356  descriptor.m_ConstantWeights = true;
357  descriptor.m_BiasEnabled = false;
358 
359  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
360  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
361  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
362 
363  TestConstantLayerVisitor weightsVisitor(weights);
364  TestFullyConnectedLayerVistor visitor(descriptor, layerName);
365 
366  NetworkImpl net;
367 
368  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
369  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
370  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
371 
372  weightsLayer->ExecuteStrategy(weightsVisitor);
373  layer->ExecuteStrategy(visitor);
374 }
375 
376 TEST_CASE("CheckFullyConnectedLayerWithBiases")
377 {
378  FullyConnectedDescriptor descriptor;
379  descriptor.m_TransposeWeightMatrix = true;
380  descriptor.m_ConstantWeights = true;
381  descriptor.m_BiasEnabled = true;
382 
383  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
384  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
385  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
386 
387  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
388  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
389  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
390 
391  TestConstantLayerVisitor weightsVisitor(weights);
392  TestConstantLayerVisitor biasesVisitor(biases);
393  TestFullyConnectedLayerVistor visitor(descriptor);
394 
395  NetworkImpl net;
396 
397  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
398  IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
399  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
400  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
401  biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
402 
403  weightsLayer->ExecuteStrategy(weightsVisitor);
404  biasesLayer->ExecuteStrategy(biasesVisitor);
405  layer->ExecuteStrategy(visitor);
406 }
407 
408 TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
409 {
410  const char* layerName = "FullyConnectedLayer";
411  FullyConnectedDescriptor descriptor;
412  descriptor.m_TransposeWeightMatrix = true;
413  descriptor.m_ConstantWeights = true;
414  descriptor.m_BiasEnabled = true;
415 
416  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
417  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
418  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
419 
420  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
421  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
422  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
423 
424  TestConstantLayerVisitor weightsVisitor(weights);
425  TestConstantLayerVisitor biasesVisitor(biases);
426  TestFullyConnectedLayerVistor visitor(descriptor, layerName);
427 
428  NetworkImpl net;
429 
430  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
431  IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
432  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
433  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
434  biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
435 
436  weightsLayer->ExecuteStrategy(weightsVisitor);
437  biasesLayer->ExecuteStrategy(biasesVisitor);
438  layer->ExecuteStrategy(visitor);
439 }
440 
441 TEST_CASE("CheckBatchNormalizationLayer")
442 {
443  BatchNormalizationDescriptor descriptor;
444  descriptor.m_Eps = 0.0002f;
445  descriptor.m_DataLayout = DataLayout::NHWC;
446 
447  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
448  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
449  ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
450 
451  std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
452  std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
453  ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32, 0.0f, 0, true), varianceData);
454 
455  std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
456  std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
457  ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32, 0.0f, 0, true), betaData);
458 
459  std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
460  std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
461  ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32, 0.0f, 0, true), gammaData);
462 
463  TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
464 
465  NetworkImpl net;
466 
467  IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
468  layer->ExecuteStrategy(visitor);
469 }
470 
471 TEST_CASE("CheckNamedBatchNormalizationLayer")
472 {
473  const char* layerName = "BatchNormalizationLayer";
474  BatchNormalizationDescriptor descriptor;
475  descriptor.m_Eps = 0.0002f;
476  descriptor.m_DataLayout = DataLayout::NHWC;
477 
478  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
479  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
480  ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
481 
482  std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
483  std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
484  ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32, 0.0f, 0, true), varianceData);
485 
486  std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
487  std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
488  ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32, 0.0f, 0, true), betaData);
489 
490  std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
491  std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
492  ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32, 0.0f, 0, true), gammaData);
493 
494  TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
495 
496  NetworkImpl net;
497 
499  descriptor, mean, variance, beta, gamma, layerName);
500  layer->ExecuteStrategy(visitor);
501 }
502 
503 TEST_CASE("CheckConstLayer")
504 {
505  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
506  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
507  ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
508 
509  TestConstantLayerVisitor visitor(input);
510 
511  NetworkImpl net;
512 
513  IConnectableLayer* const layer = net.AddConstantLayer(input);
514  layer->ExecuteStrategy(visitor);
515 }
516 
517 TEST_CASE("CheckNamedConstLayer")
518 {
519  const char* layerName = "ConstantLayer";
520  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
521  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
522  ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
523 
524  TestConstantLayerVisitor visitor(input, layerName);
525 
526  NetworkImpl net;
527 
528  IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
529  layer->ExecuteStrategy(visitor);
530 }
531 
532 TEST_CASE("CheckLstmLayerBasic")
533 {
534  LstmDescriptor descriptor;
535  descriptor.m_ActivationFunc = 3;
536  descriptor.m_ClippingThresProj = 0.5f;
537  descriptor.m_ClippingThresCell = 0.3f;
538  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
539 
540  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
541  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
542  ConstTensor inputToForgetWeights(
543  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
544  inputToForgetWeightsData);
545 
546  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
547  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
548  ConstTensor inputToCellWeights(
549  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
550  inputToCellWeightsData);
551 
552  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
553  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
554  ConstTensor inputToOutputWeights(
555  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
556  inputToOutputWeightsData);
557 
558  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
559  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
560  ConstTensor recurrentToForgetWeights(
561  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
562  recurrentToForgetWeightsData);
563 
564  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
565  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
566  ConstTensor recurrentToCellWeights(
567  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
568  recurrentToCellWeightsData);
569 
570  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
571  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
572  ConstTensor recurrentToOutputWeights(
573  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
574  recurrentToOutputWeightsData);
575 
576  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
577  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
578  ConstTensor forgetGateBias(
579  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
580  forgetGateBiasData);
581 
582  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
583  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
584  ConstTensor cellBias(
585  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
586  cellBiasData);
587 
588  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
589  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
590  ConstTensor outputGateBias(
591  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
592  outputGateBiasData);
593 
594  LstmInputParams params;
595  params.m_InputToForgetWeights = &inputToForgetWeights;
596  params.m_InputToCellWeights = &inputToCellWeights;
597  params.m_InputToOutputWeights = &inputToOutputWeights;
598  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
599  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
600  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
601  params.m_ForgetGateBias = &forgetGateBias;
602  params.m_CellBias = &cellBias;
603  params.m_OutputGateBias = &outputGateBias;
604 
605  TestLstmLayerVisitor visitor(descriptor, params);
606 
607  NetworkImpl net;
608 
609  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
610  layer->ExecuteStrategy(visitor);
611 }
612 
613 TEST_CASE("CheckNamedLstmLayerBasic")
614 {
615  const char* layerName = "LstmLayer";
616  LstmDescriptor descriptor;
617  descriptor.m_ActivationFunc = 3;
618  descriptor.m_ClippingThresProj = 0.5f;
619  descriptor.m_ClippingThresCell = 0.3f;
620  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
621 
622  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
623  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
624  ConstTensor inputToForgetWeights(
625  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
626  inputToForgetWeightsData);
627 
628  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
629  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
630  ConstTensor inputToCellWeights(
631  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
632  inputToCellWeightsData);
633 
634  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
635  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
636  ConstTensor inputToOutputWeights(
637  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
638  inputToOutputWeightsData);
639 
640  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
641  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
642  ConstTensor recurrentToForgetWeights(
643  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
644  recurrentToForgetWeightsData);
645 
646  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
647  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
648  ConstTensor recurrentToCellWeights(
649  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
650  recurrentToCellWeightsData);
651 
652  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
653  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
654  ConstTensor recurrentToOutputWeights(
655  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
656  recurrentToOutputWeightsData);
657 
658  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
659  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
660  ConstTensor forgetGateBias(
661  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
662  forgetGateBiasData);
663 
664  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
665  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
666  ConstTensor cellBias(
667  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
668  cellBiasData);
669 
670  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
671  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
672  ConstTensor outputGateBias(
673  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
674  outputGateBiasData);
675 
676  LstmInputParams params;
677  params.m_InputToForgetWeights = &inputToForgetWeights;
678  params.m_InputToCellWeights = &inputToCellWeights;
679  params.m_InputToOutputWeights = &inputToOutputWeights;
680  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
681  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
682  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
683  params.m_ForgetGateBias = &forgetGateBias;
684  params.m_CellBias = &cellBias;
685  params.m_OutputGateBias = &outputGateBias;
686 
687  TestLstmLayerVisitor visitor(descriptor, params, layerName);
688 
689  NetworkImpl net;
690 
691  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
692  layer->ExecuteStrategy(visitor);
693 }
694 
695 TEST_CASE("CheckLstmLayerCifgDisabled")
696 {
697  LstmDescriptor descriptor;
698  descriptor.m_ActivationFunc = 3;
699  descriptor.m_ClippingThresProj = 0.5f;
700  descriptor.m_ClippingThresCell = 0.3f;
701  descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
702 
703  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
704  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
705  ConstTensor inputToForgetWeights(
706  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
707  inputToForgetWeightsData);
708 
709  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
710  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
711  ConstTensor inputToCellWeights(
712  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
713  inputToCellWeightsData);
714 
715  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
716  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
717  ConstTensor inputToOutputWeights(
718  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
719  inputToOutputWeightsData);
720 
721  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
722  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
723  ConstTensor recurrentToForgetWeights(
724  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
725  recurrentToForgetWeightsData);
726 
727  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
728  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
729  ConstTensor recurrentToCellWeights(
730  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
731  recurrentToCellWeightsData);
732 
733  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
734  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
735  ConstTensor recurrentToOutputWeights(
736  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
737  recurrentToOutputWeightsData);
738 
739  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
740  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
741  ConstTensor forgetGateBias(
742  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
743  forgetGateBiasData);
744 
745  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
746  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
747  ConstTensor cellBias(
748  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
749  cellBiasData);
750 
751  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
752  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
753  ConstTensor outputGateBias(
754  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
755  outputGateBiasData);
756 
757  std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
758  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
759  ConstTensor inputToInputWeights(
760  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
761  inputToInputWeightsData);
762 
763  std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
764  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
765  ConstTensor recurrentToInputWeights(
766  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
767  recurrentToInputWeightsData);
768 
769  std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
770  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
771  ConstTensor inputGateBias(
772  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
773  inputGateBiasData);
774 
775  LstmInputParams params;
776  params.m_InputToForgetWeights = &inputToForgetWeights;
777  params.m_InputToCellWeights = &inputToCellWeights;
778  params.m_InputToOutputWeights = &inputToOutputWeights;
779  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
780  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
781  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
782  params.m_ForgetGateBias = &forgetGateBias;
783  params.m_CellBias = &cellBias;
784  params.m_OutputGateBias = &outputGateBias;
785 
786  params.m_InputToInputWeights = &inputToInputWeights;
787  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
788  params.m_InputGateBias = &inputGateBias;
789 
790  TestLstmLayerVisitor visitor(descriptor, params);
791 
792  NetworkImpl net;
793 
794  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
795  layer->ExecuteStrategy(visitor);
796 }
797 
798 TEST_CASE("CheckNamedLstmLayerCifgDisabled")
799 {
800  const char* layerName = "LstmLayer";
801  LstmDescriptor descriptor;
802  descriptor.m_ActivationFunc = 3;
803  descriptor.m_ClippingThresProj = 0.5f;
804  descriptor.m_ClippingThresCell = 0.3f;
805  descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
806 
807  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
808  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
809  ConstTensor inputToForgetWeights(
810  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
811  inputToForgetWeightsData);
812 
813  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
814  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
815  ConstTensor inputToCellWeights(
816  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
817  inputToCellWeightsData);
818 
819  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
820  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
821  ConstTensor inputToOutputWeights(
822  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
823  inputToOutputWeightsData);
824 
825  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
826  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
827  ConstTensor recurrentToForgetWeights(
828  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
829  recurrentToForgetWeightsData);
830 
831  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
832  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
833  ConstTensor recurrentToCellWeights(
834  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
835  recurrentToCellWeightsData);
836 
837  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
838  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
839  ConstTensor recurrentToOutputWeights(
840  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
841  recurrentToOutputWeightsData);
842 
843  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
844  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
845  ConstTensor forgetGateBias(
846  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
847  forgetGateBiasData);
848 
849  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
850  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
851  ConstTensor cellBias(
852  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
853  cellBiasData);
854 
855  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
856  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
857  ConstTensor outputGateBias(
858  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
859  outputGateBiasData);
860 
861  std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
862  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
863  ConstTensor inputToInputWeights(
864  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
865  inputToInputWeightsData);
866 
867  std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
868  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
869  ConstTensor recurrentToInputWeights(
870  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
871  recurrentToInputWeightsData);
872 
873  std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
874  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
875  ConstTensor inputGateBias(
876  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
877  inputGateBiasData);
878 
879  LstmInputParams params;
880  params.m_InputToForgetWeights = &inputToForgetWeights;
881  params.m_InputToCellWeights = &inputToCellWeights;
882  params.m_InputToOutputWeights = &inputToOutputWeights;
883  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
884  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
885  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
886  params.m_ForgetGateBias = &forgetGateBias;
887  params.m_CellBias = &cellBias;
888  params.m_OutputGateBias = &outputGateBias;
889 
890  params.m_InputToInputWeights = &inputToInputWeights;
891  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
892  params.m_InputGateBias = &inputGateBias;
893 
894  TestLstmLayerVisitor visitor(descriptor, params, layerName);
895 
896  NetworkImpl net;
897 
898  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
899  layer->ExecuteStrategy(visitor);
900 }
901 
902 // TODO add one with peephole
903 TEST_CASE("CheckLstmLayerPeephole")
904 {
905  LstmDescriptor descriptor;
906  descriptor.m_ActivationFunc = 3;
907  descriptor.m_ClippingThresProj = 0.5f;
908  descriptor.m_ClippingThresCell = 0.3f;
909  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
910  descriptor.m_PeepholeEnabled = true;
911 
912  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
913  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
914  ConstTensor inputToForgetWeights(
915  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
916  inputToForgetWeightsData);
917 
918  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
919  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
920  ConstTensor inputToCellWeights(
921  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
922  inputToCellWeightsData);
923 
924  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
925  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
926  ConstTensor inputToOutputWeights(
927  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
928  inputToOutputWeightsData);
929 
930  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
931  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
932  ConstTensor recurrentToForgetWeights(
933  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
934  recurrentToForgetWeightsData);
935 
936  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
937  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
938  ConstTensor recurrentToCellWeights(
939  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
940  recurrentToCellWeightsData);
941 
942  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
943  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
944  ConstTensor recurrentToOutputWeights(
945  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
946  recurrentToOutputWeightsData);
947 
948  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
949  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
950  ConstTensor forgetGateBias(
951  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
952  forgetGateBiasData);
953 
954  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
955  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
956  ConstTensor cellBias(
957  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
958  cellBiasData);
959 
960  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
961  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
962  ConstTensor outputGateBias(
963  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
964  outputGateBiasData);
965 
966  std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
967  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
968  ConstTensor cellToForgetWeights(
969  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
970  cellToForgetWeightsData);
971 
972  std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
973  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
974  ConstTensor cellToOutputWeights(
975  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
976  cellToOutputWeightsData);
977 
978  LstmInputParams params;
979  params.m_InputToForgetWeights = &inputToForgetWeights;
980  params.m_InputToCellWeights = &inputToCellWeights;
981  params.m_InputToOutputWeights = &inputToOutputWeights;
982  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
983  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
984  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
985  params.m_ForgetGateBias = &forgetGateBias;
986  params.m_CellBias = &cellBias;
987  params.m_OutputGateBias = &outputGateBias;
988 
989  params.m_CellToForgetWeights = &cellToForgetWeights;
990  params.m_CellToOutputWeights = &cellToOutputWeights;
991 
992  TestLstmLayerVisitor visitor(descriptor, params);
993 
994  NetworkImpl net;
995 
996  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
997  layer->ExecuteStrategy(visitor);
998 }
999 
1000 TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
1001 {
1002  LstmDescriptor descriptor;
1003  descriptor.m_ActivationFunc = 3;
1004  descriptor.m_ClippingThresProj = 0.5f;
1005  descriptor.m_ClippingThresCell = 0.3f;
1006  descriptor.m_CifgEnabled = false;
1007  descriptor.m_PeepholeEnabled = true;
1008 
1009  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1010  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1011  ConstTensor inputToForgetWeights(
1012  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1013  inputToForgetWeightsData);
1014 
1015  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1016  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1017  ConstTensor inputToCellWeights(
1018  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1019  inputToCellWeightsData);
1020 
1021  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1022  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1023  ConstTensor inputToOutputWeights(
1024  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1025  inputToOutputWeightsData);
1026 
1027  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1028  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1029  ConstTensor recurrentToForgetWeights(
1030  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1031  recurrentToForgetWeightsData);
1032 
1033  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1034  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1035  ConstTensor recurrentToCellWeights(
1036  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1037  recurrentToCellWeightsData);
1038 
1039  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1040  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1041  ConstTensor recurrentToOutputWeights(
1042  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1043  recurrentToOutputWeightsData);
1044 
1045  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1046  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1047  ConstTensor forgetGateBias(
1048  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1049  forgetGateBiasData);
1050 
1051  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1052  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1053  ConstTensor cellBias(
1054  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1055  cellBiasData);
1056 
1057  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1058  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1059  ConstTensor outputGateBias(
1060  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1061  outputGateBiasData);
1062 
1063  std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1064  std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
1065  ConstTensor cellToInputWeights(
1066  TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1067  cellToInputWeightsData);
1068 
1069  std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1070  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1071  ConstTensor cellToForgetWeights(
1072  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1073  cellToForgetWeightsData);
1074 
1075  std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1076  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1077  ConstTensor cellToOutputWeights(
1078  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1079  cellToOutputWeightsData);
1080 
1081  std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1082  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
1083  ConstTensor inputToInputWeights(
1084  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1085  inputToInputWeightsData);
1086 
1087  std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1088  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
1089  ConstTensor recurrentToInputWeights(
1090  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1091  recurrentToInputWeightsData);
1092 
1093  std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1094  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
1095  ConstTensor inputGateBias(
1096  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1097  inputGateBiasData);
1098 
1099  LstmInputParams params;
1100  // Basic params
1101  params.m_InputToForgetWeights = &inputToForgetWeights;
1102  params.m_InputToCellWeights = &inputToCellWeights;
1103  params.m_InputToOutputWeights = &inputToOutputWeights;
1104  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1105  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1106  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1107  params.m_ForgetGateBias = &forgetGateBias;
1108  params.m_CellBias = &cellBias;
1109  params.m_OutputGateBias = &outputGateBias;
1110 
1111  // Peephole params
1112  params.m_CellToInputWeights = &cellToInputWeights;
1113  params.m_CellToForgetWeights = &cellToForgetWeights;
1114  params.m_CellToOutputWeights = &cellToOutputWeights;
1115 
1116  // Cifg params
1117  params.m_InputToInputWeights = &inputToInputWeights;
1118  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
1119  params.m_InputGateBias = &inputGateBias;
1120 
1121  TestLstmLayerVisitor visitor(descriptor, params);
1122 
1123  NetworkImpl net;
1124 
1125  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
1126  layer->ExecuteStrategy(visitor);
1127 }
1128 
1129 TEST_CASE("CheckNamedLstmLayerPeephole")
1130 {
1131  const char* layerName = "LstmLayer";
1132  LstmDescriptor descriptor;
1133  descriptor.m_ActivationFunc = 3;
1134  descriptor.m_ClippingThresProj = 0.5f;
1135  descriptor.m_ClippingThresCell = 0.3f;
1136  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
1137  descriptor.m_PeepholeEnabled = true;
1138 
1139  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1140  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1141  ConstTensor inputToForgetWeights(
1142  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1143  inputToForgetWeightsData);
1144 
1145  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1146  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1147  ConstTensor inputToCellWeights(
1148  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1149  inputToCellWeightsData);
1150 
1151  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1152  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1153  ConstTensor inputToOutputWeights(
1154  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1155  inputToOutputWeightsData);
1156 
1157  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1158  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1159  ConstTensor recurrentToForgetWeights(
1160  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1161  recurrentToForgetWeightsData);
1162 
1163  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1164  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1165  ConstTensor recurrentToCellWeights(
1166  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1167  recurrentToCellWeightsData);
1168 
1169  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1170  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1171  ConstTensor recurrentToOutputWeights(
1172  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1173  recurrentToOutputWeightsData);
1174 
1175  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1176  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1177  ConstTensor forgetGateBias(
1178  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1179  forgetGateBiasData);
1180 
1181  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1182  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1183  ConstTensor cellBias(
1184  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1185  cellBiasData);
1186 
1187  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1188  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1189  ConstTensor outputGateBias(
1190  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1191  outputGateBiasData);
1192 
1193  std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1194  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1195  ConstTensor cellToForgetWeights(
1196  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1197  cellToForgetWeightsData);
1198 
1199  std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1200  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1201  ConstTensor cellToOutputWeights(
1202  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1203  cellToOutputWeightsData);
1204 
1205  LstmInputParams params;
1206  params.m_InputToForgetWeights = &inputToForgetWeights;
1207  params.m_InputToCellWeights = &inputToCellWeights;
1208  params.m_InputToOutputWeights = &inputToOutputWeights;
1209  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1210  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1211  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1212  params.m_ForgetGateBias = &forgetGateBias;
1213  params.m_CellBias = &cellBias;
1214  params.m_OutputGateBias = &outputGateBias;
1215 
1216  params.m_CellToForgetWeights = &cellToForgetWeights;
1217  params.m_CellToOutputWeights = &cellToOutputWeights;
1218 
1219  TestLstmLayerVisitor visitor(descriptor, params, layerName);
1220 
1221  NetworkImpl net;
1222 
1223  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
1224  layer->ExecuteStrategy(visitor);
1225 }
1226 
1227 // TODO add one with projection
1228 TEST_CASE("CheckLstmLayerProjection")
1229 {
1230  LstmDescriptor descriptor;
1231  descriptor.m_ActivationFunc = 3;
1232  descriptor.m_ClippingThresProj = 0.5f;
1233  descriptor.m_ClippingThresCell = 0.3f;
1234  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
1235  descriptor.m_ProjectionEnabled = true;
1236 
1237  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1238  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1239  ConstTensor inputToForgetWeights(
1240  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1241  inputToForgetWeightsData);
1242 
1243  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1244  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1245  ConstTensor inputToCellWeights(
1246  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1247  inputToCellWeightsData);
1248 
1249  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1250  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1251  ConstTensor inputToOutputWeights(
1252  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1253  inputToOutputWeightsData);
1254 
1255  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1256  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1257  ConstTensor recurrentToForgetWeights(
1258  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1259  recurrentToForgetWeightsData);
1260 
1261  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1262  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1263  ConstTensor recurrentToCellWeights(
1264  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1265  recurrentToCellWeightsData);
1266 
1267  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1268  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1269  ConstTensor recurrentToOutputWeights(
1270  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1271  recurrentToOutputWeightsData);
1272 
1273  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1274  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1275  ConstTensor forgetGateBias(
1276  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1277  forgetGateBiasData);
1278 
1279  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1280  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1281  ConstTensor cellBias(
1282  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1283  cellBiasData);
1284 
1285  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1286  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1287  ConstTensor outputGateBias(
1288  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1289  outputGateBiasData);
1290 
1291  std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1292  std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
1293  ConstTensor projectionBias(
1294  TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1295  projectionBiasData);
1296 
1297  std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1298  std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
1299  ConstTensor projectionWeights(
1300  TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1301  projectionWeightsData);
1302 
1303  LstmInputParams params;
1304  params.m_InputToForgetWeights = &inputToForgetWeights;
1305  params.m_InputToCellWeights = &inputToCellWeights;
1306  params.m_InputToOutputWeights = &inputToOutputWeights;
1307  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1308  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1309  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1310  params.m_ForgetGateBias = &forgetGateBias;
1311  params.m_CellBias = &cellBias;
1312  params.m_OutputGateBias = &outputGateBias;
1313 
1314  params.m_ProjectionWeights = &projectionWeights;
1315  params.m_ProjectionBias = &projectionBias;
1316 
1317  TestLstmLayerVisitor visitor(descriptor, params);
1318 
1319  NetworkImpl net;
1320 
1321  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
1322  layer->ExecuteStrategy(visitor);
1323 }
1324 
1325 TEST_CASE("CheckNamedLstmLayerProjection")
1326 {
1327  const char* layerName = "LstmLayer";
1328  LstmDescriptor descriptor;
1329  descriptor.m_ActivationFunc = 3;
1330  descriptor.m_ClippingThresProj = 0.5f;
1331  descriptor.m_ClippingThresCell = 0.3f;
1332  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
1333  descriptor.m_ProjectionEnabled = true;
1334 
1335  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1336  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1337  ConstTensor inputToForgetWeights(
1338  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1339  inputToForgetWeightsData);
1340 
1341  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1342  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1343  ConstTensor inputToCellWeights(
1344  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1345  inputToCellWeightsData);
1346 
1347  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1348  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1349  ConstTensor inputToOutputWeights(
1350  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1351  inputToOutputWeightsData);
1352 
1353  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1354  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1355  ConstTensor recurrentToForgetWeights(
1356  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1357  recurrentToForgetWeightsData);
1358 
1359  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1360  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1361  ConstTensor recurrentToCellWeights(
1362  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1363  recurrentToCellWeightsData);
1364 
1365  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1366  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1367  ConstTensor recurrentToOutputWeights(
1368  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1369  recurrentToOutputWeightsData);
1370 
1371  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1372  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1373  ConstTensor forgetGateBias(
1374  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1375  forgetGateBiasData);
1376 
1377  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1378  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1379  ConstTensor cellBias(
1380  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1381  cellBiasData);
1382 
1383  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1384  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1385  ConstTensor outputGateBias(
1386  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1387  outputGateBiasData);
1388 
1389  std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1390  std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
1391  ConstTensor projectionBias(
1392  TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1393  projectionBiasData);
1394 
1395  std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1396  std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
1397  ConstTensor projectionWeights(
1398  TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1399  projectionWeightsData);
1400 
1401  LstmInputParams params;
1402  params.m_InputToForgetWeights = &inputToForgetWeights;
1403  params.m_InputToCellWeights = &inputToCellWeights;
1404  params.m_InputToOutputWeights = &inputToOutputWeights;
1405  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1406  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1407  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1408  params.m_ForgetGateBias = &forgetGateBias;
1409  params.m_CellBias = &cellBias;
1410  params.m_OutputGateBias = &outputGateBias;
1411 
1412  params.m_ProjectionWeights = &projectionWeights;
1413  params.m_ProjectionBias = &projectionBias;
1414 
1415  TestLstmLayerVisitor visitor(descriptor, params, layerName);
1416 
1417  NetworkImpl net;
1418 
1419  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
1420  layer->ExecuteStrategy(visitor);
1421 }
1422 
1423 TEST_CASE("CheckQLstmLayerBasic")
1424 {
1425  QLstmDescriptor descriptor;
1426  descriptor.m_ProjectionClip = 0.5f;
1427  descriptor.m_CellClip = 0.3f;
1428  descriptor.m_CifgEnabled = true;
1429 
1430  // Basic params ONLY
1431  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1432  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1433  ConstTensor inputToForgetWeights(
1434  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1435  inputToForgetWeightsData);
1436 
1437  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1438  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1439  ConstTensor inputToCellWeights(
1440  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1441  inputToCellWeightsData);
1442 
1443  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1444  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1445  ConstTensor inputToOutputWeights(
1446  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1447  inputToOutputWeightsData);
1448 
1449  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1450  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1451  ConstTensor recurrentToForgetWeights(
1452  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1453  recurrentToForgetWeightsData);
1454 
1455  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1456  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1457  ConstTensor recurrentToCellWeights(
1458  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1459  recurrentToCellWeightsData);
1460 
1461  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1462  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1463  ConstTensor recurrentToOutputWeights(
1464  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1465  recurrentToOutputWeightsData);
1466 
1467  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1468  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1469  ConstTensor forgetGateBias(
1470  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1471  forgetGateBiasData);
1472 
1473  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1474  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1475  ConstTensor cellBias(
1476  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1477  cellBiasData);
1478 
1479  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1480  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1481  ConstTensor outputGateBias(
1482  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1483  outputGateBiasData);
1484 
1485  LstmInputParams params;
1486  params.m_InputToForgetWeights = &inputToForgetWeights;
1487  params.m_InputToCellWeights = &inputToCellWeights;
1488  params.m_InputToOutputWeights = &inputToOutputWeights;
1489  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1490  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1491  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1492  params.m_ForgetGateBias = &forgetGateBias;
1493  params.m_CellBias = &cellBias;
1494  params.m_OutputGateBias = &outputGateBias;
1495 
1496  TestQLstmLayerVisitor visitor(descriptor, params);
1497 
1498  NetworkImpl net;
1499 
1500  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
1501  layer->ExecuteStrategy(visitor);
1502 }
1503 
1504 TEST_CASE("CheckNamedQLstmLayerBasic")
1505 {
1506  const char* layerName = "QLstmLayer";
1507  QLstmDescriptor descriptor;
1508  descriptor.m_ProjectionClip = 0.5f;
1509  descriptor.m_CellClip = 0.3f;
1510  descriptor.m_CifgEnabled = true;
1511 
1512  // Basic params ONLY
1513  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1514  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1515  ConstTensor inputToForgetWeights(
1516  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1517  inputToForgetWeightsData);
1518 
1519  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1520  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1521  ConstTensor inputToCellWeights(
1522  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1523  inputToCellWeightsData);
1524 
1525  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1526  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1527  ConstTensor inputToOutputWeights(
1528  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1529  inputToOutputWeightsData);
1530 
1531  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1532  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1533  ConstTensor recurrentToForgetWeights(
1534  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1535  recurrentToForgetWeightsData);
1536 
1537  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1538  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1539  ConstTensor recurrentToCellWeights(
1540  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1541  recurrentToCellWeightsData);
1542 
1543  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1544  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1545  ConstTensor recurrentToOutputWeights(
1546  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1547  recurrentToOutputWeightsData);
1548 
1549  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1550  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1551  ConstTensor forgetGateBias(
1552  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1553  forgetGateBiasData);
1554 
1555  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1556  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1557  ConstTensor cellBias(
1558  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1559  cellBiasData);
1560 
1561  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1562  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1563  ConstTensor outputGateBias(
1564  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1565  outputGateBiasData);
1566 
1567  LstmInputParams params;
1568  params.m_InputToForgetWeights = &inputToForgetWeights;
1569  params.m_InputToCellWeights = &inputToCellWeights;
1570  params.m_InputToOutputWeights = &inputToOutputWeights;
1571  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1572  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1573  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1574  params.m_ForgetGateBias = &forgetGateBias;
1575  params.m_CellBias = &cellBias;
1576  params.m_OutputGateBias = &outputGateBias;
1577 
1578  TestQLstmLayerVisitor visitor(descriptor, params, layerName);
1579 
1580  NetworkImpl net;
1581 
1582  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params, layerName);
1583  layer->ExecuteStrategy(visitor);
1584 }
1585 
1586 TEST_CASE("CheckQLstmLayerCifgDisabled")
1587 {
1588  QLstmDescriptor descriptor;
1589  descriptor.m_ProjectionClip = 0.5f;
1590  descriptor.m_CellClip = 0.3f;
1591  descriptor.m_CifgEnabled = false;
1592 
1593  // Basic params
1594  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1595  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1596  ConstTensor inputToForgetWeights(
1597  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1598  inputToForgetWeightsData);
1599 
1600  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1601  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1602  ConstTensor inputToCellWeights(
1603  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1604  inputToCellWeightsData);
1605 
1606  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1607  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1608  ConstTensor inputToOutputWeights(
1609  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1610  inputToOutputWeightsData);
1611 
1612  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1613  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1614  ConstTensor recurrentToForgetWeights(
1615  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1616  recurrentToForgetWeightsData);
1617 
1618  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1619  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1620  ConstTensor recurrentToCellWeights(
1621  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1622  recurrentToCellWeightsData);
1623 
1624  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1625  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1626  ConstTensor recurrentToOutputWeights(
1627  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1628  recurrentToOutputWeightsData);
1629 
1630  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1631  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1632  ConstTensor forgetGateBias(
1633  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1634  forgetGateBiasData);
1635 
1636  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1637  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1638  ConstTensor cellBias(
1639  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1640  cellBiasData);
1641 
1642  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1643  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1644  ConstTensor outputGateBias(
1645  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1646  outputGateBiasData);
1647 
1648  // CIFG disabled params
1649  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1650  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
1651  ConstTensor inputToInputWeights(
1652  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1653  inputToInputWeightsData);
1654 
1655  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1656  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
1657  ConstTensor recurrentToInputWeights(
1658  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1659  recurrentToInputWeightsData);
1660 
1661  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1662  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
1663  ConstTensor inputGateBias(
1664  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1665  inputGateBiasData);
1666 
1667  LstmInputParams params;
1668 
1669  // Basic params
1670  params.m_InputToForgetWeights = &inputToForgetWeights;
1671  params.m_InputToCellWeights = &inputToCellWeights;
1672  params.m_InputToOutputWeights = &inputToOutputWeights;
1673  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1674  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1675  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1676  params.m_ForgetGateBias = &forgetGateBias;
1677  params.m_CellBias = &cellBias;
1678  params.m_OutputGateBias = &outputGateBias;
1679 
1680  // CIFG disabled params
1681  params.m_InputToInputWeights = &inputToInputWeights;
1682  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
1683  params.m_InputGateBias = &inputGateBias;
1684 
1685  TestQLstmLayerVisitor visitor(descriptor, params);
1686 
1687  NetworkImpl net;
1688 
1689  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
1690  layer->ExecuteStrategy(visitor);
1691 }
1692 
1693 TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
1694 {
1695  QLstmDescriptor descriptor;
1696  descriptor.m_ProjectionClip = 0.5f;
1697  descriptor.m_CellClip = 0.3f;
1698  descriptor.m_CifgEnabled = false;
1699  descriptor.m_PeepholeEnabled = true;
1700 
1701  // Basic params
1702  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1703  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1704  ConstTensor inputToForgetWeights(
1705  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1706  inputToForgetWeightsData);
1707 
1708  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1709  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1710  ConstTensor inputToCellWeights(
1711  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1712  inputToCellWeightsData);
1713 
1714  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1715  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1716  ConstTensor inputToOutputWeights(
1717  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1718  inputToOutputWeightsData);
1719 
1720  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1721  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1722  ConstTensor recurrentToForgetWeights(
1723  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1724  recurrentToForgetWeightsData);
1725 
1726  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1727  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1728  ConstTensor recurrentToCellWeights(
1729  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1730  recurrentToCellWeightsData);
1731 
1732  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1733  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1734  ConstTensor recurrentToOutputWeights(
1735  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1736  recurrentToOutputWeightsData);
1737 
1738  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1739  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1740  ConstTensor forgetGateBias(
1741  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1742  forgetGateBiasData);
1743 
1744  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1745  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1746  ConstTensor cellBias(
1747  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1748  cellBiasData);
1749 
1750  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1751  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1752  ConstTensor outputGateBias(
1753  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1754  outputGateBiasData);
1755 
1756  // CIFG disabled params
1757  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1758  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
1759  ConstTensor inputToInputWeights(
1760  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1761  inputToInputWeightsData);
1762 
1763  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1764  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
1765  ConstTensor recurrentToInputWeights(
1766  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1767  recurrentToInputWeightsData);
1768 
1769  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1770  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
1771  ConstTensor inputGateBias(
1772  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1773  inputGateBiasData);
1774 
1775  // Peephole enabled, CIFG disabled params
1776  std::vector<int16_t> cellToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1777  std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
1778  ConstTensor cellToInputWeights(
1779  TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1780  cellToInputWeightsData);
1781 
1782  std::vector<int16_t> cellToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1783  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1784  ConstTensor cellToForgetWeights(
1785  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1786  cellToForgetWeightsData);
1787 
1788  std::vector<int16_t> cellToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1789  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1790  ConstTensor cellToOutputWeights(
1791  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1792  cellToOutputWeightsData);
1793 
1794  LstmInputParams params;
1795 
1796  // Basic params
1797  params.m_InputToForgetWeights = &inputToForgetWeights;
1798  params.m_InputToCellWeights = &inputToCellWeights;
1799  params.m_InputToOutputWeights = &inputToOutputWeights;
1800  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1801  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1802  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1803  params.m_ForgetGateBias = &forgetGateBias;
1804  params.m_CellBias = &cellBias;
1805  params.m_OutputGateBias = &outputGateBias;
1806 
1807  // CIFG disabled params
1808  params.m_InputToInputWeights = &inputToInputWeights;
1809  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
1810  params.m_InputGateBias = &inputGateBias;
1811 
1812  // Peephole enabled, CIFG disabled params
1813  params.m_CellToInputWeights = &cellToInputWeights;
1814  params.m_CellToForgetWeights = &cellToForgetWeights;
1815  params.m_CellToOutputWeights = &cellToOutputWeights;
1816 
1817  TestQLstmLayerVisitor visitor(descriptor, params);
1818 
1819  NetworkImpl net;
1820 
1821  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
1822  layer->ExecuteStrategy(visitor);
1823 }
1824 
1825 TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
1826 {
1827  QLstmDescriptor descriptor;
1828  descriptor.m_ProjectionClip = 0.5f;
1829  descriptor.m_CellClip = 0.3f;
1830  descriptor.m_CifgEnabled = true;
1831  descriptor.m_PeepholeEnabled = true;
1832 
1833  // Basic params
1834  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1835  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1836  ConstTensor inputToForgetWeights(
1837  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1838  inputToForgetWeightsData);
1839 
1840  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1841  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1842  ConstTensor inputToCellWeights(
1843  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1844  inputToCellWeightsData);
1845 
1846  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1847  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1848  ConstTensor inputToOutputWeights(
1849  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1850  inputToOutputWeightsData);
1851 
1852  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1853  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1854  ConstTensor recurrentToForgetWeights(
1855  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1856  recurrentToForgetWeightsData);
1857 
1858  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1859  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1860  ConstTensor recurrentToCellWeights(
1861  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1862  recurrentToCellWeightsData);
1863 
1864  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1865  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1866  ConstTensor recurrentToOutputWeights(
1867  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1868  recurrentToOutputWeightsData);
1869 
1870  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1871  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1872  ConstTensor forgetGateBias(
1873  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1874  forgetGateBiasData);
1875 
1876  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1877  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1878  ConstTensor cellBias(
1879  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1880  cellBiasData);
1881 
1882  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1883  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1884  ConstTensor outputGateBias(
1885  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1886  outputGateBiasData);
1887 
1888  // Peephole enabled and CIFG enabled params
1889  std::vector<int16_t> cellToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1890  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1891  ConstTensor cellToForgetWeights(
1892  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1893  cellToForgetWeightsData);
1894 
1895  std::vector<int16_t> cellToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1896  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1897  ConstTensor cellToOutputWeights(
1898  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1899  cellToOutputWeightsData);
1900 
1901  LstmInputParams params;
1902 
1903  // Basic params
1904  params.m_InputToForgetWeights = &inputToForgetWeights;
1905  params.m_InputToCellWeights = &inputToCellWeights;
1906  params.m_InputToOutputWeights = &inputToOutputWeights;
1907  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1908  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1909  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1910  params.m_ForgetGateBias = &forgetGateBias;
1911  params.m_CellBias = &cellBias;
1912  params.m_OutputGateBias = &outputGateBias;
1913 
1914  // Peephole enabled and CIFG enabled params
1915  params.m_CellToForgetWeights = &cellToForgetWeights;
1916  params.m_CellToOutputWeights = &cellToOutputWeights;
1917 
1918  TestQLstmLayerVisitor visitor(descriptor, params);
1919 
1920  NetworkImpl net;
1921 
1922  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
1923  layer->ExecuteStrategy(visitor);
1924 }
1925 
1926 TEST_CASE("CheckQLstmLayerProjectionEnabled")
1927 {
1928  QLstmDescriptor descriptor;
1929  descriptor.m_ProjectionClip = 0.5f;
1930  descriptor.m_CellClip = 0.3f;
1931  descriptor.m_CifgEnabled = true;
1932  descriptor.m_ProjectionEnabled = true;
1933 
1934  // Basic params ONLY
1935  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1936  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1937  ConstTensor inputToForgetWeights(
1938  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1939  inputToForgetWeightsData);
1940 
1941  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1942  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1943  ConstTensor inputToCellWeights(
1944  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1945  inputToCellWeightsData);
1946 
1947  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1948  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1949  ConstTensor inputToOutputWeights(
1950  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1951  inputToOutputWeightsData);
1952 
1953  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1954  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1955  ConstTensor recurrentToForgetWeights(
1956  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1957  recurrentToForgetWeightsData);
1958 
1959  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1960  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1961  ConstTensor recurrentToCellWeights(
1962  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1963  recurrentToCellWeightsData);
1964 
1965  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1966  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1967  ConstTensor recurrentToOutputWeights(
1968  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1969  recurrentToOutputWeightsData);
1970 
1971  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1972  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1973  ConstTensor forgetGateBias(
1974  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1975  forgetGateBiasData);
1976 
1977  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1978  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1979  ConstTensor cellBias(
1980  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1981  cellBiasData);
1982 
1983  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1984  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1985  ConstTensor outputGateBias(
1986  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1987  outputGateBiasData);
1988 
1989  // Projection enabled params
1990  std::vector<uint8_t> projectionWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1991  std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
1992  ConstTensor projectionWeights(
1993  TensorInfo(4, projectionWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1994  projectionWeightsData);
1995 
1996  std::vector<int32_t> projectionBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1997  std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
1998  ConstTensor projectionBias(
1999  TensorInfo(4, projectionBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2000  projectionBiasData);
2001 
2002  LstmInputParams params;
2003 
2004  // Basic params
2005  params.m_InputToForgetWeights = &inputToForgetWeights;
2006  params.m_InputToCellWeights = &inputToCellWeights;
2007  params.m_InputToOutputWeights = &inputToOutputWeights;
2008  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2009  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
2010  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2011  params.m_ForgetGateBias = &forgetGateBias;
2012  params.m_CellBias = &cellBias;
2013  params.m_OutputGateBias = &outputGateBias;
2014 
2015  // Projection enabled params
2016  params.m_ProjectionWeights = &projectionWeights;
2017  params.m_ProjectionBias = &projectionBias;
2018 
2019  TestQLstmLayerVisitor visitor(descriptor, params);
2020 
2021  NetworkImpl net;
2022 
2023  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
2024  layer->ExecuteStrategy(visitor);
2025 }
2026 
2027 TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
2028 {
2029  QLstmDescriptor descriptor;
2030  descriptor.m_ProjectionClip = 0.5f;
2031  descriptor.m_CellClip = 0.3f;
2032  descriptor.m_CifgEnabled = false;
2033  descriptor.m_LayerNormEnabled = true;
2034 
2035  // Basic params
2036  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2037  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
2038  ConstTensor inputToForgetWeights(
2039  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2040  inputToForgetWeightsData);
2041 
2042  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2043  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
2044  ConstTensor inputToCellWeights(
2045  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2046  inputToCellWeightsData);
2047 
2048  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2049  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
2050  ConstTensor inputToOutputWeights(
2051  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2052  inputToOutputWeightsData);
2053 
2054  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2055  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
2056  ConstTensor recurrentToForgetWeights(
2057  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2058  recurrentToForgetWeightsData);
2059 
2060  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2061  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
2062  ConstTensor recurrentToCellWeights(
2063  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2064  recurrentToCellWeightsData);
2065 
2066  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2067  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
2068  ConstTensor recurrentToOutputWeights(
2069  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2070  recurrentToOutputWeightsData);
2071 
2072  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2073  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
2074  ConstTensor forgetGateBias(
2075  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2076  forgetGateBiasData);
2077 
2078  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2079  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
2080  ConstTensor cellBias(
2081  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2082  cellBiasData);
2083 
2084  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2085  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
2086  ConstTensor outputGateBias(
2087  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2088  outputGateBiasData);
2089 
2090  // CIFG disabled params
2091  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2092  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
2093  ConstTensor inputToInputWeights(
2094  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2095  inputToInputWeightsData);
2096 
2097  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2098  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
2099  ConstTensor recurrentToInputWeights(
2100  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2101  recurrentToInputWeightsData);
2102 
2103  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2104  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
2105  ConstTensor inputGateBias(
2106  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2107  inputGateBiasData);
2108 
2109  // Layer Norm enabled, CIFG disabled params
2110  std::vector<int16_t> inputLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2111  std::vector<unsigned int> inputLayerNormWeightsDimensions = {1, 1, 3, 3};
2112  ConstTensor inputLayerNormWeights(
2113  TensorInfo(4, inputLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
2114  inputLayerNormWeightsData);
2115 
2116  std::vector<int16_t> forgetLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2117  std::vector<unsigned int> forgetLayerNormWeightsDimensions = {1, 1, 3, 3};
2118  ConstTensor forgetLayerNormWeights(
2119  TensorInfo(4, forgetLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
2120  forgetLayerNormWeightsData);
2121 
2122  std::vector<int16_t> cellLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2123  std::vector<unsigned int> cellLayerNormWeightsDimensions = {1, 1, 3, 3};
2124  ConstTensor cellLayerNormWeights(
2125  TensorInfo(4, cellLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
2126  cellLayerNormWeightsData);
2127 
2128  std::vector<int16_t> outputLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2129  std::vector<unsigned int> outputLayerNormWeightsDimensions = {1, 1, 3, 3};
2130  ConstTensor outputLayerNormWeights(
2131  TensorInfo(4, outputLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
2132  outputLayerNormWeightsData);
2133 
2134  LstmInputParams params;
2135 
2136  // Basic params
2137  params.m_InputToForgetWeights = &inputToForgetWeights;
2138  params.m_InputToCellWeights = &inputToCellWeights;
2139  params.m_InputToOutputWeights = &inputToOutputWeights;
2140  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2141  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
2142  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2143  params.m_ForgetGateBias = &forgetGateBias;
2144  params.m_CellBias = &cellBias;
2145  params.m_OutputGateBias = &outputGateBias;
2146 
2147  // CIFG disabled params
2148  params.m_InputToInputWeights = &inputToInputWeights;
2149  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
2150  params.m_InputGateBias = &inputGateBias;
2151 
2152  // Layer Norm enabled, CIFG disabled params
2153  params.m_InputLayerNormWeights = &inputLayerNormWeights;
2154  params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
2155  params.m_CellLayerNormWeights = &cellLayerNormWeights;
2156  params.m_OutputLayerNormWeights = &outputLayerNormWeights;
2157 
2158  TestQLstmLayerVisitor visitor(descriptor, params);
2159 
2160  NetworkImpl net;
2161 
2162  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
2163  layer->ExecuteStrategy(visitor);
2164 }
2165 
2166 
2167 TEST_CASE("CheckQuantizedLstmLayer")
2168 {
2169  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2170  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
2171  ConstTensor inputToInputWeights(
2172  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2173  inputToInputWeightsData);
2174 
2175  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2176  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
2177  ConstTensor inputToForgetWeights(
2178  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2179  inputToForgetWeightsData);
2180 
2181  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2182  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
2183  ConstTensor inputToCellWeights(
2184  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2185  inputToCellWeightsData);
2186 
2187  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2188  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
2189  ConstTensor inputToOutputWeights(
2190  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2191  inputToOutputWeightsData);
2192 
2193 
2194  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2195  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
2196  ConstTensor recurrentToInputWeights(
2197  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2198  recurrentToInputWeightsData);
2199 
2200  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2201  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
2202  ConstTensor recurrentToForgetWeights(
2203  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2204  recurrentToForgetWeightsData);
2205 
2206  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2207  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
2208  ConstTensor recurrentToCellWeights(
2209  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2210  recurrentToCellWeightsData);
2211 
2212  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2213  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
2214  ConstTensor recurrentToOutputWeights(
2215  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2216  recurrentToOutputWeightsData);
2217 
2218 
2219  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2220  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
2221  ConstTensor inputGateBias(
2222  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2223  inputGateBiasData);
2224 
2225  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2226  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
2227  ConstTensor forgetGateBias(
2228  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2229  forgetGateBiasData);
2230 
2231  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2232  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
2233  ConstTensor cellBias(
2234  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2235  cellBiasData);
2236 
2237  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2238  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
2239  ConstTensor outputGateBias(
2240  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2241  outputGateBiasData);
2242 
2243  QuantizedLstmInputParams params;
2244 
2245  params.m_InputToInputWeights = &inputToInputWeights;
2246  params.m_InputToForgetWeights = &inputToForgetWeights;
2247  params.m_InputToCellWeights = &inputToCellWeights;
2248  params.m_InputToOutputWeights = &inputToOutputWeights;
2249 
2250  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
2251  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2252  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
2253  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2254 
2255  params.m_InputGateBias = &inputGateBias;
2256  params.m_ForgetGateBias = &forgetGateBias;
2257  params.m_CellBias = &cellBias;
2258  params.m_OutputGateBias = &outputGateBias;
2259 
2260  TestQuantizedLstmLayerVisitor visitor(params);
2261 
2262  NetworkImpl net;
2263 
2264  IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params);
2265  layer->ExecuteStrategy(visitor);
2266 }
2267 
2268 TEST_CASE("CheckNamedQuantizedLstmLayer")
2269 {
2270  const char* layerName = "LstmLayer";
2271  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2272  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
2273  ConstTensor inputToInputWeights(
2274  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2275  inputToInputWeightsData);
2276 
2277  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2278  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
2279  ConstTensor inputToForgetWeights(
2280  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2281  inputToForgetWeightsData);
2282 
2283  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2284  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
2285  ConstTensor inputToCellWeights(
2286  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2287  inputToCellWeightsData);
2288 
2289  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2290  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
2291  ConstTensor inputToOutputWeights(
2292  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2293  inputToOutputWeightsData);
2294 
2295 
2296  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2297  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
2298  ConstTensor recurrentToInputWeights(
2299  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2300  recurrentToInputWeightsData);
2301 
2302  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2303  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
2304  ConstTensor recurrentToForgetWeights(
2305  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2306  recurrentToForgetWeightsData);
2307 
2308  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2309  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
2310  ConstTensor recurrentToCellWeights(
2311  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2312  recurrentToCellWeightsData);
2313 
2314  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2315  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
2316  ConstTensor recurrentToOutputWeights(
2317  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2318  recurrentToOutputWeightsData);
2319 
2320 
2321  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2322  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
2323  ConstTensor inputGateBias(
2324  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2325  inputGateBiasData);
2326 
2327  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2328  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
2329  ConstTensor forgetGateBias(
2330  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2331  forgetGateBiasData);
2332 
2333  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2334  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
2335  ConstTensor cellBias(
2336  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2337  cellBiasData);
2338 
2339  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2340  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
2341  ConstTensor outputGateBias(
2342  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2343  outputGateBiasData);
2344 
2345  QuantizedLstmInputParams params;
2346 
2347  params.m_InputToInputWeights = &inputToInputWeights;
2348  params.m_InputToForgetWeights = &inputToForgetWeights;
2349  params.m_InputToCellWeights = &inputToCellWeights;
2350  params.m_InputToOutputWeights = &inputToOutputWeights;
2351 
2352  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
2353  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2354  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
2355  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2356 
2357  params.m_InputGateBias = &inputGateBias;
2358  params.m_ForgetGateBias = &forgetGateBias;
2359  params.m_CellBias = &cellBias;
2360  params.m_OutputGateBias = &outputGateBias;
2361 
2362  TestQuantizedLstmLayerVisitor visitor(params, layerName);
2363 
2364  NetworkImpl net;
2365 
2366  IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params, layerName);
2367  layer->ExecuteStrategy(visitor);
2368 }
2369 
2370 }
2371 
2372 } // namespace armnn
TEST_SUITE("TestConstTensorLayerVisitor")
void CheckDescriptor(const BatchNormalizationDescriptor &descriptor)
void CheckDescriptor(const QLstmDescriptor &descriptor)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
bool m_BiasEnabled
Enable/disable bias.
const ConstTensor * m_RecurrentToOutputWeights
uint32_t m_PadBottom
Padding bottom value in the height dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
const ConstTensor * m_RecurrentToForgetWeights
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
void CheckDescriptor(const FullyConnectedDescriptor &descriptor)
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
IConnectableLayer * AddQuantizedLstmLayer(const QuantizedLstmInputParams &params, const char *name=nullptr)
Definition: Network.cpp:2507
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
IConnectableLayer * AddConstantLayer(const ConstTensor &input, const char *name=nullptr)
Definition: Network.cpp:2236
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
uint32_t m_PadRight
Padding right value in the width dimension.
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
Copyright (c) 2021 ARM Limited and Contributors.
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Definition: Network.cpp:2082
Private implementation of INetwork.
Definition: Network.hpp:31
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
IConnectableLayer * AddQLstmLayer(const QLstmDescriptor &descriptor, const LstmInputParams &params, const char *name=nullptr)
Definition: Network.cpp:2545
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool m_LayerNormEnabled
Enable/disable layer normalization.
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
An LstmDescriptor for the LstmLayer.
uint32_t m_PadTop
Padding top value in the height dimension.
void CheckDescriptor(const LstmDescriptor &descriptor)
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
const ConstTensor * m_InputToForgetWeights
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
bool m_PeepholeEnabled
Enable/disable peephole.
void CheckDescriptor(const Convolution2dDescriptor &convolution2dDescriptor)
A QLstmDescriptor for the QLstmLayer.
uint32_t m_ActivationFunc
The activation function to use.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CheckDescriptor(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor)
const ConstTensor * m_RecurrentToInputWeights
float m_ClippingThresCell
Clipping threshold value for the cell state.
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
float m_CellClip
Clipping threshold value for the cell state.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void CheckConstTensorPtrs(const std::string &name, const ConstTensor *expected, const ConstTensor *actual)
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const char *name=nullptr)
Definition: Network.cpp:1912
bool m_ProjectionEnabled
Enable/disable the projection layer.
void CheckInputParameters(const QuantizedLstmInputParams &params)
const ConstTensor * m_RecurrentToCellWeights
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
const ConstTensor * m_InputToOutputWeights
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)
Definition: Network.cpp:2180
virtual int Connect(IInputSlot &destination)=0
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
virtual ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const =0
Apply a visitor to this layer.
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const ConstTensor &weights, const Optional< ConstTensor > &biases, const char *name=nullptr)
Definition: Network.cpp:2022
IConnectableLayer * AddLstmLayer(const LstmDescriptor &descriptor, const LstmInputParams &params, const char *name=nullptr)
Definition: Network.cpp:2268
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
uint32_t m_PadRight
Padding right value in the width dimension.
bool m_ConstantWeights
Enable/disable constant weights and biases.
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40