ArmNN
 22.05
ConstTensorLayerVisitor.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "Network.hpp"
8 
9 #include <doctest/doctest.h>
10 
11 namespace armnn
12 {
13 
15 {
16  CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
17  CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
18  CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
19  CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
20  CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
21  CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
22  CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
23  CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
24 }
25 
27  const DepthwiseConvolution2dDescriptor& convolution2dDescriptor)
28 {
29  CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
30  CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
31  CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
32  CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
33  CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
34  CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
35  CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
36  CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
37 }
38 
40 {
41  CHECK(m_Descriptor.m_BiasEnabled == descriptor.m_BiasEnabled);
42  CHECK(m_Descriptor.m_TransposeWeightMatrix == descriptor.m_TransposeWeightMatrix);
43 }
44 
46 {
47  CHECK(m_Descriptor.m_Eps == descriptor.m_Eps);
48  CHECK(m_Descriptor.m_DataLayout == descriptor.m_DataLayout);
49 }
50 
52 {
53  CHECK(m_Descriptor.m_ActivationFunc == descriptor.m_ActivationFunc);
54  CHECK(m_Descriptor.m_ClippingThresCell == descriptor.m_ClippingThresCell);
55  CHECK(m_Descriptor.m_ClippingThresProj == descriptor.m_ClippingThresProj);
56  CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
57  CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
58  CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
59 }
60 
62 {
63  CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
64  CHECK(m_Descriptor.m_ProjectionClip == descriptor.m_ProjectionClip);
65  CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
66  CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
67  CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
68 }
69 
71 {
72  CheckConstTensorPtrs("InputToInputWeights",
73  m_InputParams.m_InputToInputWeights,
74  inputParams.m_InputToInputWeights);
75 
76  CheckConstTensorPtrs("InputToForgetWeights",
77  m_InputParams.m_InputToForgetWeights,
78  inputParams.m_InputToForgetWeights);
79 
80  CheckConstTensorPtrs("InputToCellWeights",
81  m_InputParams.m_InputToCellWeights,
82  inputParams.m_InputToCellWeights);
83 
84  CheckConstTensorPtrs("InputToOutputWeights",
85  m_InputParams.m_InputToOutputWeights,
86  inputParams.m_InputToOutputWeights);
87 
88  CheckConstTensorPtrs("RecurrentToInputWeights",
89  m_InputParams.m_RecurrentToInputWeights,
90  inputParams.m_RecurrentToInputWeights);
91 
92  CheckConstTensorPtrs("RecurrentToForgetWeights",
93  m_InputParams.m_RecurrentToForgetWeights,
94  inputParams.m_RecurrentToForgetWeights);
95 
96  CheckConstTensorPtrs("RecurrentToCellWeights",
97  m_InputParams.m_RecurrentToCellWeights,
98  inputParams.m_RecurrentToCellWeights);
99 
100  CheckConstTensorPtrs("RecurrentToOutputWeights",
101  m_InputParams.m_RecurrentToOutputWeights,
102  inputParams.m_RecurrentToOutputWeights);
103 
104  CheckConstTensorPtrs("InputGateBias", m_InputParams.m_InputGateBias, inputParams.m_InputGateBias);
105  CheckConstTensorPtrs("ForgetGateBias", m_InputParams.m_ForgetGateBias, inputParams.m_ForgetGateBias);
106  CheckConstTensorPtrs("CellBias", m_InputParams.m_CellBias, inputParams.m_CellBias);
107  CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
108 }
109 
110 TEST_SUITE("TestConstTensorLayerVisitor")
111 {
112 TEST_CASE("CheckConvolution2dLayer")
113 {
114  Convolution2dDescriptor descriptor;
115  descriptor.m_PadLeft = 2;
116  descriptor.m_PadRight = 3;
117  descriptor.m_PadBottom = 1;
118  descriptor.m_PadTop = 5;
119  descriptor.m_StrideX = 2;
120  descriptor.m_StrideY = 3;
121  descriptor.m_DataLayout = DataLayout::NHWC;
122  descriptor.m_BiasEnabled = false;
123 
124  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
125  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
126  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
127 
128  TestConstantLayerVisitor weightsVisitor(weights);
129  TestConvolution2dLayerVisitor visitor(descriptor);
130 
131  NetworkImpl net;
132 
133  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
134  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor);
135  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
136 
137  weightsLayer->ExecuteStrategy(weightsVisitor);
138  layer->ExecuteStrategy(visitor);
139 }
140 
141 TEST_CASE("CheckNamedConvolution2dLayer")
142 {
143  const char* layerName = "Convolution2dLayer";
144  Convolution2dDescriptor descriptor;
145  descriptor.m_PadLeft = 2;
146  descriptor.m_PadRight = 3;
147  descriptor.m_PadBottom = 1;
148  descriptor.m_PadTop = 5;
149  descriptor.m_StrideX = 2;
150  descriptor.m_StrideY = 3;
151  descriptor.m_DataLayout = DataLayout::NHWC;
152 
153  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
154  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
155  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
156 
157  TestConstantLayerVisitor weightsVisitor(weights);
158  TestConvolution2dLayerVisitor visitor(descriptor, layerName);
159 
160  NetworkImpl net;
161 
162  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
163  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, layerName);
164 
165  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
166 
167  weightsLayer->ExecuteStrategy(weightsVisitor);
168  layer->ExecuteStrategy(visitor);
169 }
170 
171 TEST_CASE("CheckConvolution2dLayerWithBiases")
172 {
173  Convolution2dDescriptor descriptor;
174  descriptor.m_PadLeft = 2;
175  descriptor.m_PadRight = 3;
176  descriptor.m_PadBottom = 1;
177  descriptor.m_PadTop = 5;
178  descriptor.m_StrideX = 2;
179  descriptor.m_StrideY = 3;
180  descriptor.m_DataLayout = DataLayout::NHWC;
181  descriptor.m_BiasEnabled = true;
182 
183  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
184  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
185  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
186 
187  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
188  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
189  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
190 
191  TestConstantLayerVisitor weightsVisitor(weights);
192  TestConstantLayerVisitor biasVisitor(biases);
193  TestConvolution2dLayerVisitor visitor(descriptor);
194 
195  NetworkImpl net;
196  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
197  IConnectableLayer* const biasLayer = net.AddConstantLayer(biases);
198  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor);
199 
200  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
201  biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
202 
203  biasLayer->ExecuteStrategy(biasVisitor);
204  weightsLayer->ExecuteStrategy(weightsVisitor);
205  layer->ExecuteStrategy(visitor);
206 }
207 
208 TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
209 {
210  const char* layerName = "Convolution2dLayer";
211  Convolution2dDescriptor descriptor;
212  descriptor.m_PadLeft = 2;
213  descriptor.m_PadRight = 3;
214  descriptor.m_PadBottom = 1;
215  descriptor.m_PadTop = 5;
216  descriptor.m_StrideX = 2;
217  descriptor.m_StrideY = 3;
218  descriptor.m_DataLayout = DataLayout::NHWC;
219  descriptor.m_BiasEnabled = true;
220 
221  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
222  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
223  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
224 
225  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
226  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
227  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
228 
229  TestConstantLayerVisitor weightsVisitor(weights);
230  TestConstantLayerVisitor biasVisitor(biases);
231  TestConvolution2dLayerVisitor visitor(descriptor, layerName);
232 
233  NetworkImpl net;
234  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
235  IConnectableLayer* const biasLayer = net.AddConstantLayer(biases);
236  IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, layerName);
237 
238  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
239  biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
240 
241  biasLayer->ExecuteStrategy(biasVisitor);
242  weightsLayer->ExecuteStrategy(weightsVisitor);
243  layer->ExecuteStrategy(visitor);
244 }
245 
246 TEST_CASE("CheckDepthwiseConvolution2dLayer")
247 {
249  descriptor.m_PadLeft = 2;
250  descriptor.m_PadRight = 3;
251  descriptor.m_PadBottom = 1;
252  descriptor.m_PadTop = 5;
253  descriptor.m_StrideX = 2;
254  descriptor.m_StrideY = 3;
255  descriptor.m_DataLayout = DataLayout::NHWC;
256 
257  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
258  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
259  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
260 
261  NetworkImpl net;
262 
263  TestConstantLayerVisitor weightsVisitor(weights);
264  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor);
265 
266  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
267  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor);
268  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
269 
270  weightsLayer->ExecuteStrategy(weightsVisitor);
271  layer->ExecuteStrategy(visitor);
272 }
273 
274 TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
275 {
276  const char* layerName = "DepthwiseConvolution2dLayer";
278  descriptor.m_PadLeft = 2;
279  descriptor.m_PadRight = 3;
280  descriptor.m_PadBottom = 1;
281  descriptor.m_PadTop = 5;
282  descriptor.m_StrideX = 2;
283  descriptor.m_StrideY = 3;
284  descriptor.m_DataLayout = DataLayout::NHWC;
285 
286  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
287  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
288  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
289 
290  NetworkImpl net;
291 
292  TestConstantLayerVisitor weightsVisitor(weights);
293  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, layerName);
294 
295  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
296  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, layerName);
297  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
298 
299  weightsLayer->ExecuteStrategy(weightsVisitor);
300  layer->ExecuteStrategy(visitor);
301 }
302 
303 TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
304 {
306  descriptor.m_PadLeft = 2;
307  descriptor.m_PadRight = 3;
308  descriptor.m_PadBottom = 1;
309  descriptor.m_PadTop = 5;
310  descriptor.m_StrideX = 2;
311  descriptor.m_StrideY = 3;
312  descriptor.m_DataLayout = DataLayout::NHWC;
313  descriptor.m_BiasEnabled = true;
314 
315  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
316  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
317  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
318 
319  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
320  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
321  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
322 
323  TestConstantLayerVisitor weightsVisitor(weights);
324  TestConstantLayerVisitor biasesVisitor(biases);
325  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor);
326 
327  NetworkImpl net;
328 
329  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
330  IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
331  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor);
332  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
333  biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
334 
335  weightsLayer->ExecuteStrategy(weightsVisitor);
336  biasesLayer->ExecuteStrategy(biasesVisitor);
337  layer->ExecuteStrategy(visitor);
338 }
339 
340 TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
341 {
342  const char* layerName = "DepthwiseConvolution2dLayer";
344  descriptor.m_PadLeft = 2;
345  descriptor.m_PadRight = 3;
346  descriptor.m_PadBottom = 1;
347  descriptor.m_PadTop = 5;
348  descriptor.m_StrideX = 2;
349  descriptor.m_StrideY = 3;
350  descriptor.m_DataLayout = DataLayout::NHWC;
351  descriptor.m_BiasEnabled = true;
352 
353  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
354  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
355  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
356 
357  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
358  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
359  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
360 
361  TestConstantLayerVisitor weightsVisitor(weights);
362  TestConstantLayerVisitor biasesVisitor(biases);
363  TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, layerName);
364 
365  NetworkImpl net;
366 
367  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
368  IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
369  IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, layerName);
370  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
371  biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
372 
373  weightsLayer->ExecuteStrategy(weightsVisitor);
374  biasesLayer->ExecuteStrategy(biasesVisitor);
375  layer->ExecuteStrategy(visitor);
376 }
377 
378 TEST_CASE("CheckFullyConnectedLayer")
379 {
380  FullyConnectedDescriptor descriptor;
381  descriptor.m_TransposeWeightMatrix = true;
382  descriptor.m_ConstantWeights = true;
383  descriptor.m_BiasEnabled = false;
384 
385  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
386  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
387  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
388 
389  TestConstantLayerVisitor weightsVisitor(weights);
390  TestFullyConnectedLayerVistor visitor(descriptor);
391 
392  NetworkImpl net;
393 
394  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
395  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
396  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
397 
398  weightsLayer->ExecuteStrategy(weightsVisitor);
399  layer->ExecuteStrategy(visitor);
400 }
401 
402 TEST_CASE("CheckNamedFullyConnectedLayer")
403 {
404  const char* layerName = "FullyConnectedLayer";
405  FullyConnectedDescriptor descriptor;
406  descriptor.m_TransposeWeightMatrix = true;
407  descriptor.m_ConstantWeights = true;
408  descriptor.m_BiasEnabled = false;
409 
410  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
411  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
412  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
413 
414  TestConstantLayerVisitor weightsVisitor(weights);
415  TestFullyConnectedLayerVistor visitor(descriptor, layerName);
416 
417  NetworkImpl net;
418 
419  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
420  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
421  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
422 
423  weightsLayer->ExecuteStrategy(weightsVisitor);
424  layer->ExecuteStrategy(visitor);
425 }
426 
427 TEST_CASE("CheckFullyConnectedLayerWithBiases")
428 {
429  FullyConnectedDescriptor descriptor;
430  descriptor.m_TransposeWeightMatrix = true;
431  descriptor.m_ConstantWeights = true;
432  descriptor.m_BiasEnabled = true;
433 
434  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
435  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
436  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
437 
438  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
439  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
440  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
441 
442  TestConstantLayerVisitor weightsVisitor(weights);
443  TestConstantLayerVisitor biasesVisitor(biases);
444  TestFullyConnectedLayerVistor visitor(descriptor);
445 
446  NetworkImpl net;
447 
448  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
449  IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
450  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor);
451  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
452  biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
453 
454  weightsLayer->ExecuteStrategy(weightsVisitor);
455  biasesLayer->ExecuteStrategy(biasesVisitor);
456  layer->ExecuteStrategy(visitor);
457 }
458 
459 TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
460 {
461  const char* layerName = "FullyConnectedLayer";
462  FullyConnectedDescriptor descriptor;
463  descriptor.m_TransposeWeightMatrix = true;
464  descriptor.m_ConstantWeights = true;
465  descriptor.m_BiasEnabled = true;
466 
467  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
468  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
469  ConstTensor weights(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
470 
471  std::vector<float> biasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
472  std::vector<unsigned int> biasDimensions = {1, 1, 3, 3};
473  ConstTensor biases(TensorInfo(4, biasDimensions.data(), DataType::Float32, 0.0f, 0, true), biasData);
474 
475  TestConstantLayerVisitor weightsVisitor(weights);
476  TestConstantLayerVisitor biasesVisitor(biases);
477  TestFullyConnectedLayerVistor visitor(descriptor, layerName);
478 
479  NetworkImpl net;
480 
481  IConnectableLayer* const weightsLayer = net.AddConstantLayer(weights);
482  IConnectableLayer* const biasesLayer = net.AddConstantLayer(biases);
483  IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, layerName);
484  weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
485  biasesLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2));
486 
487  weightsLayer->ExecuteStrategy(weightsVisitor);
488  biasesLayer->ExecuteStrategy(biasesVisitor);
489  layer->ExecuteStrategy(visitor);
490 }
491 
492 TEST_CASE("CheckBatchNormalizationLayer")
493 {
494  BatchNormalizationDescriptor descriptor;
495  descriptor.m_Eps = 0.0002f;
496  descriptor.m_DataLayout = DataLayout::NHWC;
497 
498  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
499  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
500  ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
501 
502  std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
503  std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
504  ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32, 0.0f, 0, true), varianceData);
505 
506  std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
507  std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
508  ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32, 0.0f, 0, true), betaData);
509 
510  std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
511  std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
512  ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32, 0.0f, 0, true), gammaData);
513 
514  TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
515 
516  NetworkImpl net;
517 
518  IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
519  layer->ExecuteStrategy(visitor);
520 }
521 
522 TEST_CASE("CheckNamedBatchNormalizationLayer")
523 {
524  const char* layerName = "BatchNormalizationLayer";
525  BatchNormalizationDescriptor descriptor;
526  descriptor.m_Eps = 0.0002f;
527  descriptor.m_DataLayout = DataLayout::NHWC;
528 
529  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
530  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
531  ConstTensor mean(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
532 
533  std::vector<float> varianceData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
534  std::vector<unsigned int> varianceDimensions = {1, 1, 3, 3};
535  ConstTensor variance(TensorInfo(4, varianceDimensions.data(), DataType::Float32, 0.0f, 0, true), varianceData);
536 
537  std::vector<float> betaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
538  std::vector<unsigned int> betaDimensions = {1, 1, 3, 3};
539  ConstTensor beta(TensorInfo(4, betaDimensions.data(), DataType::Float32, 0.0f, 0, true), betaData);
540 
541  std::vector<float> gammaData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
542  std::vector<unsigned int> gammaDimensions = {1, 1, 3, 3};
543  ConstTensor gamma(TensorInfo(4, gammaDimensions.data(), DataType::Float32, 0.0f, 0, true), gammaData);
544 
545  TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
546 
547  NetworkImpl net;
548 
550  descriptor, mean, variance, beta, gamma, layerName);
551  layer->ExecuteStrategy(visitor);
552 }
553 
554 TEST_CASE("CheckConstLayer")
555 {
556  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
557  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
558  ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
559 
560  TestConstantLayerVisitor visitor(input);
561 
562  NetworkImpl net;
563 
564  IConnectableLayer* const layer = net.AddConstantLayer(input);
565  layer->ExecuteStrategy(visitor);
566 }
567 
568 TEST_CASE("CheckNamedConstLayer")
569 {
570  const char* layerName = "ConstantLayer";
571  std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
572  std::vector<unsigned int> dimensions = {1, 1, 3, 3};
573  ConstTensor input(TensorInfo(4, dimensions.data(), DataType::Float32, 0.0f, 0, true), data);
574 
575  TestConstantLayerVisitor visitor(input, layerName);
576 
577  NetworkImpl net;
578 
579  IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
580  layer->ExecuteStrategy(visitor);
581 }
582 
583 TEST_CASE("CheckLstmLayerBasic")
584 {
585  LstmDescriptor descriptor;
586  descriptor.m_ActivationFunc = 3;
587  descriptor.m_ClippingThresProj = 0.5f;
588  descriptor.m_ClippingThresCell = 0.3f;
589  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
590 
591  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
592  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
593  ConstTensor inputToForgetWeights(
594  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
595  inputToForgetWeightsData);
596 
597  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
598  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
599  ConstTensor inputToCellWeights(
600  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
601  inputToCellWeightsData);
602 
603  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
604  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
605  ConstTensor inputToOutputWeights(
606  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
607  inputToOutputWeightsData);
608 
609  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
610  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
611  ConstTensor recurrentToForgetWeights(
612  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
613  recurrentToForgetWeightsData);
614 
615  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
616  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
617  ConstTensor recurrentToCellWeights(
618  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
619  recurrentToCellWeightsData);
620 
621  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
622  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
623  ConstTensor recurrentToOutputWeights(
624  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
625  recurrentToOutputWeightsData);
626 
627  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
628  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
629  ConstTensor forgetGateBias(
630  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
631  forgetGateBiasData);
632 
633  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
634  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
635  ConstTensor cellBias(
636  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
637  cellBiasData);
638 
639  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
640  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
641  ConstTensor outputGateBias(
642  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
643  outputGateBiasData);
644 
645  LstmInputParams params;
646  params.m_InputToForgetWeights = &inputToForgetWeights;
647  params.m_InputToCellWeights = &inputToCellWeights;
648  params.m_InputToOutputWeights = &inputToOutputWeights;
649  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
650  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
651  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
652  params.m_ForgetGateBias = &forgetGateBias;
653  params.m_CellBias = &cellBias;
654  params.m_OutputGateBias = &outputGateBias;
655 
656  TestLstmLayerVisitor visitor(descriptor, params);
657 
658  NetworkImpl net;
659 
660  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
661  layer->ExecuteStrategy(visitor);
662 }
663 
664 TEST_CASE("CheckNamedLstmLayerBasic")
665 {
666  const char* layerName = "LstmLayer";
667  LstmDescriptor descriptor;
668  descriptor.m_ActivationFunc = 3;
669  descriptor.m_ClippingThresProj = 0.5f;
670  descriptor.m_ClippingThresCell = 0.3f;
671  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
672 
673  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
674  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
675  ConstTensor inputToForgetWeights(
676  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
677  inputToForgetWeightsData);
678 
679  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
680  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
681  ConstTensor inputToCellWeights(
682  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
683  inputToCellWeightsData);
684 
685  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
686  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
687  ConstTensor inputToOutputWeights(
688  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
689  inputToOutputWeightsData);
690 
691  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
692  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
693  ConstTensor recurrentToForgetWeights(
694  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
695  recurrentToForgetWeightsData);
696 
697  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
698  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
699  ConstTensor recurrentToCellWeights(
700  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
701  recurrentToCellWeightsData);
702 
703  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
704  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
705  ConstTensor recurrentToOutputWeights(
706  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
707  recurrentToOutputWeightsData);
708 
709  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
710  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
711  ConstTensor forgetGateBias(
712  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
713  forgetGateBiasData);
714 
715  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
716  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
717  ConstTensor cellBias(
718  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
719  cellBiasData);
720 
721  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
722  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
723  ConstTensor outputGateBias(
724  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
725  outputGateBiasData);
726 
727  LstmInputParams params;
728  params.m_InputToForgetWeights = &inputToForgetWeights;
729  params.m_InputToCellWeights = &inputToCellWeights;
730  params.m_InputToOutputWeights = &inputToOutputWeights;
731  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
732  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
733  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
734  params.m_ForgetGateBias = &forgetGateBias;
735  params.m_CellBias = &cellBias;
736  params.m_OutputGateBias = &outputGateBias;
737 
738  TestLstmLayerVisitor visitor(descriptor, params, layerName);
739 
740  NetworkImpl net;
741 
742  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
743  layer->ExecuteStrategy(visitor);
744 }
745 
746 TEST_CASE("CheckLstmLayerCifgDisabled")
747 {
748  LstmDescriptor descriptor;
749  descriptor.m_ActivationFunc = 3;
750  descriptor.m_ClippingThresProj = 0.5f;
751  descriptor.m_ClippingThresCell = 0.3f;
752  descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
753 
754  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
755  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
756  ConstTensor inputToForgetWeights(
757  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
758  inputToForgetWeightsData);
759 
760  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
761  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
762  ConstTensor inputToCellWeights(
763  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
764  inputToCellWeightsData);
765 
766  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
767  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
768  ConstTensor inputToOutputWeights(
769  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
770  inputToOutputWeightsData);
771 
772  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
773  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
774  ConstTensor recurrentToForgetWeights(
775  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
776  recurrentToForgetWeightsData);
777 
778  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
779  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
780  ConstTensor recurrentToCellWeights(
781  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
782  recurrentToCellWeightsData);
783 
784  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
785  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
786  ConstTensor recurrentToOutputWeights(
787  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
788  recurrentToOutputWeightsData);
789 
790  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
791  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
792  ConstTensor forgetGateBias(
793  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
794  forgetGateBiasData);
795 
796  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
797  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
798  ConstTensor cellBias(
799  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
800  cellBiasData);
801 
802  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
803  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
804  ConstTensor outputGateBias(
805  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
806  outputGateBiasData);
807 
808  std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
809  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
810  ConstTensor inputToInputWeights(
811  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
812  inputToInputWeightsData);
813 
814  std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
815  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
816  ConstTensor recurrentToInputWeights(
817  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
818  recurrentToInputWeightsData);
819 
820  std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
821  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
822  ConstTensor inputGateBias(
823  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
824  inputGateBiasData);
825 
826  LstmInputParams params;
827  params.m_InputToForgetWeights = &inputToForgetWeights;
828  params.m_InputToCellWeights = &inputToCellWeights;
829  params.m_InputToOutputWeights = &inputToOutputWeights;
830  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
831  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
832  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
833  params.m_ForgetGateBias = &forgetGateBias;
834  params.m_CellBias = &cellBias;
835  params.m_OutputGateBias = &outputGateBias;
836 
837  params.m_InputToInputWeights = &inputToInputWeights;
838  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
839  params.m_InputGateBias = &inputGateBias;
840 
841  TestLstmLayerVisitor visitor(descriptor, params);
842 
843  NetworkImpl net;
844 
845  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
846  layer->ExecuteStrategy(visitor);
847 }
848 
849 TEST_CASE("CheckNamedLstmLayerCifgDisabled")
850 {
851  const char* layerName = "LstmLayer";
852  LstmDescriptor descriptor;
853  descriptor.m_ActivationFunc = 3;
854  descriptor.m_ClippingThresProj = 0.5f;
855  descriptor.m_ClippingThresCell = 0.3f;
856  descriptor.m_CifgEnabled = false; // if this is true then we DON'T need to set the OptCifgParams
857 
858  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
859  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
860  ConstTensor inputToForgetWeights(
861  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
862  inputToForgetWeightsData);
863 
864  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
865  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
866  ConstTensor inputToCellWeights(
867  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
868  inputToCellWeightsData);
869 
870  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
871  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
872  ConstTensor inputToOutputWeights(
873  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
874  inputToOutputWeightsData);
875 
876  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
877  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
878  ConstTensor recurrentToForgetWeights(
879  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
880  recurrentToForgetWeightsData);
881 
882  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
883  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
884  ConstTensor recurrentToCellWeights(
885  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
886  recurrentToCellWeightsData);
887 
888  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
889  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
890  ConstTensor recurrentToOutputWeights(
891  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
892  recurrentToOutputWeightsData);
893 
894  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
895  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
896  ConstTensor forgetGateBias(
897  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
898  forgetGateBiasData);
899 
900  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
901  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
902  ConstTensor cellBias(
903  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
904  cellBiasData);
905 
906  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
907  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
908  ConstTensor outputGateBias(
909  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
910  outputGateBiasData);
911 
912  std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
913  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
914  ConstTensor inputToInputWeights(
915  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
916  inputToInputWeightsData);
917 
918  std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
919  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
920  ConstTensor recurrentToInputWeights(
921  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
922  recurrentToInputWeightsData);
923 
924  std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
925  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
926  ConstTensor inputGateBias(
927  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
928  inputGateBiasData);
929 
930  LstmInputParams params;
931  params.m_InputToForgetWeights = &inputToForgetWeights;
932  params.m_InputToCellWeights = &inputToCellWeights;
933  params.m_InputToOutputWeights = &inputToOutputWeights;
934  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
935  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
936  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
937  params.m_ForgetGateBias = &forgetGateBias;
938  params.m_CellBias = &cellBias;
939  params.m_OutputGateBias = &outputGateBias;
940 
941  params.m_InputToInputWeights = &inputToInputWeights;
942  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
943  params.m_InputGateBias = &inputGateBias;
944 
945  TestLstmLayerVisitor visitor(descriptor, params, layerName);
946 
947  NetworkImpl net;
948 
949  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
950  layer->ExecuteStrategy(visitor);
951 }
952 
953 // TODO add one with peephole
954 TEST_CASE("CheckLstmLayerPeephole")
955 {
956  LstmDescriptor descriptor;
957  descriptor.m_ActivationFunc = 3;
958  descriptor.m_ClippingThresProj = 0.5f;
959  descriptor.m_ClippingThresCell = 0.3f;
960  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
961  descriptor.m_PeepholeEnabled = true;
962 
963  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
964  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
965  ConstTensor inputToForgetWeights(
966  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
967  inputToForgetWeightsData);
968 
969  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
970  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
971  ConstTensor inputToCellWeights(
972  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
973  inputToCellWeightsData);
974 
975  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
976  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
977  ConstTensor inputToOutputWeights(
978  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
979  inputToOutputWeightsData);
980 
981  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
982  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
983  ConstTensor recurrentToForgetWeights(
984  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
985  recurrentToForgetWeightsData);
986 
987  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
988  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
989  ConstTensor recurrentToCellWeights(
990  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
991  recurrentToCellWeightsData);
992 
993  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
994  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
995  ConstTensor recurrentToOutputWeights(
996  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
997  recurrentToOutputWeightsData);
998 
999  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1000  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1001  ConstTensor forgetGateBias(
1002  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1003  forgetGateBiasData);
1004 
1005  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1006  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1007  ConstTensor cellBias(
1008  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1009  cellBiasData);
1010 
1011  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1012  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1013  ConstTensor outputGateBias(
1014  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1015  outputGateBiasData);
1016 
1017  std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1018  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1019  ConstTensor cellToForgetWeights(
1020  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1021  cellToForgetWeightsData);
1022 
1023  std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1024  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1025  ConstTensor cellToOutputWeights(
1026  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1027  cellToOutputWeightsData);
1028 
1029  LstmInputParams params;
1030  params.m_InputToForgetWeights = &inputToForgetWeights;
1031  params.m_InputToCellWeights = &inputToCellWeights;
1032  params.m_InputToOutputWeights = &inputToOutputWeights;
1033  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1034  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1035  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1036  params.m_ForgetGateBias = &forgetGateBias;
1037  params.m_CellBias = &cellBias;
1038  params.m_OutputGateBias = &outputGateBias;
1039 
1040  params.m_CellToForgetWeights = &cellToForgetWeights;
1041  params.m_CellToOutputWeights = &cellToOutputWeights;
1042 
1043  TestLstmLayerVisitor visitor(descriptor, params);
1044 
1045  NetworkImpl net;
1046 
1047  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
1048  layer->ExecuteStrategy(visitor);
1049 }
1050 
1051 TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
1052 {
1053  LstmDescriptor descriptor;
1054  descriptor.m_ActivationFunc = 3;
1055  descriptor.m_ClippingThresProj = 0.5f;
1056  descriptor.m_ClippingThresCell = 0.3f;
1057  descriptor.m_CifgEnabled = false;
1058  descriptor.m_PeepholeEnabled = true;
1059 
1060  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1061  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1062  ConstTensor inputToForgetWeights(
1063  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1064  inputToForgetWeightsData);
1065 
1066  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1067  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1068  ConstTensor inputToCellWeights(
1069  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1070  inputToCellWeightsData);
1071 
1072  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1073  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1074  ConstTensor inputToOutputWeights(
1075  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1076  inputToOutputWeightsData);
1077 
1078  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1079  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1080  ConstTensor recurrentToForgetWeights(
1081  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1082  recurrentToForgetWeightsData);
1083 
1084  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1085  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1086  ConstTensor recurrentToCellWeights(
1087  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1088  recurrentToCellWeightsData);
1089 
1090  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1091  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1092  ConstTensor recurrentToOutputWeights(
1093  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1094  recurrentToOutputWeightsData);
1095 
1096  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1097  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1098  ConstTensor forgetGateBias(
1099  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1100  forgetGateBiasData);
1101 
1102  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1103  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1104  ConstTensor cellBias(
1105  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1106  cellBiasData);
1107 
1108  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1109  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1110  ConstTensor outputGateBias(
1111  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1112  outputGateBiasData);
1113 
1114  std::vector<float> cellToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1115  std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
1116  ConstTensor cellToInputWeights(
1117  TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1118  cellToInputWeightsData);
1119 
1120  std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1121  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1122  ConstTensor cellToForgetWeights(
1123  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1124  cellToForgetWeightsData);
1125 
1126  std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1127  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1128  ConstTensor cellToOutputWeights(
1129  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1130  cellToOutputWeightsData);
1131 
1132  std::vector<float> inputToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1133  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
1134  ConstTensor inputToInputWeights(
1135  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1136  inputToInputWeightsData);
1137 
1138  std::vector<float> recurrentToInputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1139  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
1140  ConstTensor recurrentToInputWeights(
1141  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1142  recurrentToInputWeightsData);
1143 
1144  std::vector<float> inputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1145  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
1146  ConstTensor inputGateBias(
1147  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1148  inputGateBiasData);
1149 
1150  LstmInputParams params;
1151  // Basic params
1152  params.m_InputToForgetWeights = &inputToForgetWeights;
1153  params.m_InputToCellWeights = &inputToCellWeights;
1154  params.m_InputToOutputWeights = &inputToOutputWeights;
1155  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1156  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1157  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1158  params.m_ForgetGateBias = &forgetGateBias;
1159  params.m_CellBias = &cellBias;
1160  params.m_OutputGateBias = &outputGateBias;
1161 
1162  // Peephole params
1163  params.m_CellToInputWeights = &cellToInputWeights;
1164  params.m_CellToForgetWeights = &cellToForgetWeights;
1165  params.m_CellToOutputWeights = &cellToOutputWeights;
1166 
1167  // Cifg params
1168  params.m_InputToInputWeights = &inputToInputWeights;
1169  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
1170  params.m_InputGateBias = &inputGateBias;
1171 
1172  TestLstmLayerVisitor visitor(descriptor, params);
1173 
1174  NetworkImpl net;
1175 
1176  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
1177  layer->ExecuteStrategy(visitor);
1178 }
1179 
1180 TEST_CASE("CheckNamedLstmLayerPeephole")
1181 {
1182  const char* layerName = "LstmLayer";
1183  LstmDescriptor descriptor;
1184  descriptor.m_ActivationFunc = 3;
1185  descriptor.m_ClippingThresProj = 0.5f;
1186  descriptor.m_ClippingThresCell = 0.3f;
1187  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
1188  descriptor.m_PeepholeEnabled = true;
1189 
1190  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1191  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1192  ConstTensor inputToForgetWeights(
1193  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1194  inputToForgetWeightsData);
1195 
1196  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1197  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1198  ConstTensor inputToCellWeights(
1199  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1200  inputToCellWeightsData);
1201 
1202  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1203  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1204  ConstTensor inputToOutputWeights(
1205  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1206  inputToOutputWeightsData);
1207 
1208  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1209  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1210  ConstTensor recurrentToForgetWeights(
1211  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1212  recurrentToForgetWeightsData);
1213 
1214  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1215  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1216  ConstTensor recurrentToCellWeights(
1217  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1218  recurrentToCellWeightsData);
1219 
1220  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1221  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1222  ConstTensor recurrentToOutputWeights(
1223  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1224  recurrentToOutputWeightsData);
1225 
1226  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1227  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1228  ConstTensor forgetGateBias(
1229  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1230  forgetGateBiasData);
1231 
1232  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1233  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1234  ConstTensor cellBias(
1235  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1236  cellBiasData);
1237 
1238  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1239  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1240  ConstTensor outputGateBias(
1241  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1242  outputGateBiasData);
1243 
1244  std::vector<float> cellToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1245  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1246  ConstTensor cellToForgetWeights(
1247  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1248  cellToForgetWeightsData);
1249 
1250  std::vector<float> cellToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1251  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1252  ConstTensor cellToOutputWeights(
1253  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1254  cellToOutputWeightsData);
1255 
1256  LstmInputParams params;
1257  params.m_InputToForgetWeights = &inputToForgetWeights;
1258  params.m_InputToCellWeights = &inputToCellWeights;
1259  params.m_InputToOutputWeights = &inputToOutputWeights;
1260  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1261  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1262  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1263  params.m_ForgetGateBias = &forgetGateBias;
1264  params.m_CellBias = &cellBias;
1265  params.m_OutputGateBias = &outputGateBias;
1266 
1267  params.m_CellToForgetWeights = &cellToForgetWeights;
1268  params.m_CellToOutputWeights = &cellToOutputWeights;
1269 
1270  TestLstmLayerVisitor visitor(descriptor, params, layerName);
1271 
1272  NetworkImpl net;
1273 
1274  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
1275  layer->ExecuteStrategy(visitor);
1276 }
1277 
1278 // TODO add one with projection
1279 TEST_CASE("CheckLstmLayerProjection")
1280 {
1281  LstmDescriptor descriptor;
1282  descriptor.m_ActivationFunc = 3;
1283  descriptor.m_ClippingThresProj = 0.5f;
1284  descriptor.m_ClippingThresCell = 0.3f;
1285  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
1286  descriptor.m_ProjectionEnabled = true;
1287 
1288  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1289  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1290  ConstTensor inputToForgetWeights(
1291  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1292  inputToForgetWeightsData);
1293 
1294  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1295  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1296  ConstTensor inputToCellWeights(
1297  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1298  inputToCellWeightsData);
1299 
1300  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1301  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1302  ConstTensor inputToOutputWeights(
1303  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1304  inputToOutputWeightsData);
1305 
1306  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1307  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1308  ConstTensor recurrentToForgetWeights(
1309  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1310  recurrentToForgetWeightsData);
1311 
1312  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1313  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1314  ConstTensor recurrentToCellWeights(
1315  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1316  recurrentToCellWeightsData);
1317 
1318  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1319  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1320  ConstTensor recurrentToOutputWeights(
1321  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1322  recurrentToOutputWeightsData);
1323 
1324  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1325  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1326  ConstTensor forgetGateBias(
1327  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1328  forgetGateBiasData);
1329 
1330  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1331  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1332  ConstTensor cellBias(
1333  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1334  cellBiasData);
1335 
1336  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1337  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1338  ConstTensor outputGateBias(
1339  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1340  outputGateBiasData);
1341 
1342  std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1343  std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
1344  ConstTensor projectionBias(
1345  TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1346  projectionBiasData);
1347 
1348  std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1349  std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
1350  ConstTensor projectionWeights(
1351  TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1352  projectionWeightsData);
1353 
1354  LstmInputParams params;
1355  params.m_InputToForgetWeights = &inputToForgetWeights;
1356  params.m_InputToCellWeights = &inputToCellWeights;
1357  params.m_InputToOutputWeights = &inputToOutputWeights;
1358  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1359  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1360  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1361  params.m_ForgetGateBias = &forgetGateBias;
1362  params.m_CellBias = &cellBias;
1363  params.m_OutputGateBias = &outputGateBias;
1364 
1365  params.m_ProjectionWeights = &projectionWeights;
1366  params.m_ProjectionBias = &projectionBias;
1367 
1368  TestLstmLayerVisitor visitor(descriptor, params);
1369 
1370  NetworkImpl net;
1371 
1372  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
1373  layer->ExecuteStrategy(visitor);
1374 }
1375 
1376 TEST_CASE("CheckNamedLstmLayerProjection")
1377 {
1378  const char* layerName = "LstmLayer";
1379  LstmDescriptor descriptor;
1380  descriptor.m_ActivationFunc = 3;
1381  descriptor.m_ClippingThresProj = 0.5f;
1382  descriptor.m_ClippingThresCell = 0.3f;
1383  descriptor.m_CifgEnabled = true; // if this is true then we DON'T need to set the OptCifgParams
1384  descriptor.m_ProjectionEnabled = true;
1385 
1386  std::vector<float> inputToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1387  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1388  ConstTensor inputToForgetWeights(
1389  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1390  inputToForgetWeightsData);
1391 
1392  std::vector<float> inputToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1393  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1394  ConstTensor inputToCellWeights(
1395  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1396  inputToCellWeightsData);
1397 
1398  std::vector<float> inputToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1399  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1400  ConstTensor inputToOutputWeights(
1401  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1402  inputToOutputWeightsData);
1403 
1404  std::vector<float> recurrentToForgetWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1405  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1406  ConstTensor recurrentToForgetWeights(
1407  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1408  recurrentToForgetWeightsData);
1409 
1410  std::vector<float> recurrentToCellWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1411  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1412  ConstTensor recurrentToCellWeights(
1413  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1414  recurrentToCellWeightsData);
1415 
1416  std::vector<float> recurrentToOutputWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1417  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1418  ConstTensor recurrentToOutputWeights(
1419  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1420  recurrentToOutputWeightsData);
1421 
1422  std::vector<float> forgetGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1423  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1424  ConstTensor forgetGateBias(
1425  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1426  forgetGateBiasData);
1427 
1428  std::vector<float> cellBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1429  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1430  ConstTensor cellBias(
1431  TensorInfo(4, cellBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1432  cellBiasData);
1433 
1434  std::vector<float> outputGateBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1435  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1436  ConstTensor outputGateBias(
1437  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1438  outputGateBiasData);
1439 
1440  std::vector<float> projectionBiasData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1441  std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
1442  ConstTensor projectionBias(
1443  TensorInfo(4, projectionBiasDimensions.data(), DataType::Float32, 0.0f, 0, true),
1444  projectionBiasData);
1445 
1446  std::vector<float> projectionWeightsData = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
1447  std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
1448  ConstTensor projectionWeights(
1449  TensorInfo(4, projectionWeightsDimensions.data(), DataType::Float32, 0.0f, 0, true),
1450  projectionWeightsData);
1451 
1452  LstmInputParams params;
1453  params.m_InputToForgetWeights = &inputToForgetWeights;
1454  params.m_InputToCellWeights = &inputToCellWeights;
1455  params.m_InputToOutputWeights = &inputToOutputWeights;
1456  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1457  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1458  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1459  params.m_ForgetGateBias = &forgetGateBias;
1460  params.m_CellBias = &cellBias;
1461  params.m_OutputGateBias = &outputGateBias;
1462 
1463  params.m_ProjectionWeights = &projectionWeights;
1464  params.m_ProjectionBias = &projectionBias;
1465 
1466  TestLstmLayerVisitor visitor(descriptor, params, layerName);
1467 
1468  NetworkImpl net;
1469 
1470  IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
1471  layer->ExecuteStrategy(visitor);
1472 }
1473 
1474 TEST_CASE("CheckQLstmLayerBasic")
1475 {
1476  QLstmDescriptor descriptor;
1477  descriptor.m_ProjectionClip = 0.5f;
1478  descriptor.m_CellClip = 0.3f;
1479  descriptor.m_CifgEnabled = true;
1480 
1481  // Basic params ONLY
1482  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1483  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1484  ConstTensor inputToForgetWeights(
1485  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1486  inputToForgetWeightsData);
1487 
1488  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1489  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1490  ConstTensor inputToCellWeights(
1491  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1492  inputToCellWeightsData);
1493 
1494  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1495  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1496  ConstTensor inputToOutputWeights(
1497  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1498  inputToOutputWeightsData);
1499 
1500  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1501  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1502  ConstTensor recurrentToForgetWeights(
1503  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1504  recurrentToForgetWeightsData);
1505 
1506  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1507  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1508  ConstTensor recurrentToCellWeights(
1509  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1510  recurrentToCellWeightsData);
1511 
1512  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1513  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1514  ConstTensor recurrentToOutputWeights(
1515  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1516  recurrentToOutputWeightsData);
1517 
1518  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1519  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1520  ConstTensor forgetGateBias(
1521  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1522  forgetGateBiasData);
1523 
1524  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1525  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1526  ConstTensor cellBias(
1527  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1528  cellBiasData);
1529 
1530  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1531  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1532  ConstTensor outputGateBias(
1533  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1534  outputGateBiasData);
1535 
1536  LstmInputParams params;
1537  params.m_InputToForgetWeights = &inputToForgetWeights;
1538  params.m_InputToCellWeights = &inputToCellWeights;
1539  params.m_InputToOutputWeights = &inputToOutputWeights;
1540  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1541  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1542  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1543  params.m_ForgetGateBias = &forgetGateBias;
1544  params.m_CellBias = &cellBias;
1545  params.m_OutputGateBias = &outputGateBias;
1546 
1547  TestQLstmLayerVisitor visitor(descriptor, params);
1548 
1549  NetworkImpl net;
1550 
1551  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
1552  layer->ExecuteStrategy(visitor);
1553 }
1554 
1555 TEST_CASE("CheckNamedQLstmLayerBasic")
1556 {
1557  const char* layerName = "QLstmLayer";
1558  QLstmDescriptor descriptor;
1559  descriptor.m_ProjectionClip = 0.5f;
1560  descriptor.m_CellClip = 0.3f;
1561  descriptor.m_CifgEnabled = true;
1562 
1563  // Basic params ONLY
1564  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1565  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1566  ConstTensor inputToForgetWeights(
1567  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1568  inputToForgetWeightsData);
1569 
1570  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1571  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1572  ConstTensor inputToCellWeights(
1573  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1574  inputToCellWeightsData);
1575 
1576  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1577  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1578  ConstTensor inputToOutputWeights(
1579  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1580  inputToOutputWeightsData);
1581 
1582  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1583  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1584  ConstTensor recurrentToForgetWeights(
1585  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1586  recurrentToForgetWeightsData);
1587 
1588  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1589  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1590  ConstTensor recurrentToCellWeights(
1591  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1592  recurrentToCellWeightsData);
1593 
1594  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1595  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1596  ConstTensor recurrentToOutputWeights(
1597  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1598  recurrentToOutputWeightsData);
1599 
1600  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1601  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1602  ConstTensor forgetGateBias(
1603  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1604  forgetGateBiasData);
1605 
1606  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1607  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1608  ConstTensor cellBias(
1609  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1610  cellBiasData);
1611 
1612  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1613  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1614  ConstTensor outputGateBias(
1615  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1616  outputGateBiasData);
1617 
1618  LstmInputParams params;
1619  params.m_InputToForgetWeights = &inputToForgetWeights;
1620  params.m_InputToCellWeights = &inputToCellWeights;
1621  params.m_InputToOutputWeights = &inputToOutputWeights;
1622  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1623  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1624  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1625  params.m_ForgetGateBias = &forgetGateBias;
1626  params.m_CellBias = &cellBias;
1627  params.m_OutputGateBias = &outputGateBias;
1628 
1629  TestQLstmLayerVisitor visitor(descriptor, params, layerName);
1630 
1631  NetworkImpl net;
1632 
1633  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params, layerName);
1634  layer->ExecuteStrategy(visitor);
1635 }
1636 
1637 TEST_CASE("CheckQLstmLayerCifgDisabled")
1638 {
1639  QLstmDescriptor descriptor;
1640  descriptor.m_ProjectionClip = 0.5f;
1641  descriptor.m_CellClip = 0.3f;
1642  descriptor.m_CifgEnabled = false;
1643 
1644  // Basic params
1645  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1646  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1647  ConstTensor inputToForgetWeights(
1648  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1649  inputToForgetWeightsData);
1650 
1651  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1652  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1653  ConstTensor inputToCellWeights(
1654  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1655  inputToCellWeightsData);
1656 
1657  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1658  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1659  ConstTensor inputToOutputWeights(
1660  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1661  inputToOutputWeightsData);
1662 
1663  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1664  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1665  ConstTensor recurrentToForgetWeights(
1666  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1667  recurrentToForgetWeightsData);
1668 
1669  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1670  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1671  ConstTensor recurrentToCellWeights(
1672  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1673  recurrentToCellWeightsData);
1674 
1675  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1676  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1677  ConstTensor recurrentToOutputWeights(
1678  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1679  recurrentToOutputWeightsData);
1680 
1681  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1682  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1683  ConstTensor forgetGateBias(
1684  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1685  forgetGateBiasData);
1686 
1687  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1688  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1689  ConstTensor cellBias(
1690  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1691  cellBiasData);
1692 
1693  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1694  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1695  ConstTensor outputGateBias(
1696  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1697  outputGateBiasData);
1698 
1699  // CIFG disabled params
1700  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1701  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
1702  ConstTensor inputToInputWeights(
1703  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1704  inputToInputWeightsData);
1705 
1706  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1707  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
1708  ConstTensor recurrentToInputWeights(
1709  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1710  recurrentToInputWeightsData);
1711 
1712  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1713  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
1714  ConstTensor inputGateBias(
1715  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1716  inputGateBiasData);
1717 
1718  LstmInputParams params;
1719 
1720  // Basic params
1721  params.m_InputToForgetWeights = &inputToForgetWeights;
1722  params.m_InputToCellWeights = &inputToCellWeights;
1723  params.m_InputToOutputWeights = &inputToOutputWeights;
1724  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1725  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1726  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1727  params.m_ForgetGateBias = &forgetGateBias;
1728  params.m_CellBias = &cellBias;
1729  params.m_OutputGateBias = &outputGateBias;
1730 
1731  // CIFG disabled params
1732  params.m_InputToInputWeights = &inputToInputWeights;
1733  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
1734  params.m_InputGateBias = &inputGateBias;
1735 
1736  TestQLstmLayerVisitor visitor(descriptor, params);
1737 
1738  NetworkImpl net;
1739 
1740  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
1741  layer->ExecuteStrategy(visitor);
1742 }
1743 
1744 TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
1745 {
1746  QLstmDescriptor descriptor;
1747  descriptor.m_ProjectionClip = 0.5f;
1748  descriptor.m_CellClip = 0.3f;
1749  descriptor.m_CifgEnabled = false;
1750  descriptor.m_PeepholeEnabled = true;
1751 
1752  // Basic params
1753  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1754  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1755  ConstTensor inputToForgetWeights(
1756  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1757  inputToForgetWeightsData);
1758 
1759  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1760  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1761  ConstTensor inputToCellWeights(
1762  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1763  inputToCellWeightsData);
1764 
1765  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1766  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1767  ConstTensor inputToOutputWeights(
1768  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1769  inputToOutputWeightsData);
1770 
1771  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1772  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1773  ConstTensor recurrentToForgetWeights(
1774  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1775  recurrentToForgetWeightsData);
1776 
1777  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1778  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1779  ConstTensor recurrentToCellWeights(
1780  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1781  recurrentToCellWeightsData);
1782 
1783  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1784  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1785  ConstTensor recurrentToOutputWeights(
1786  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1787  recurrentToOutputWeightsData);
1788 
1789  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1790  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1791  ConstTensor forgetGateBias(
1792  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1793  forgetGateBiasData);
1794 
1795  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1796  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1797  ConstTensor cellBias(
1798  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1799  cellBiasData);
1800 
1801  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1802  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1803  ConstTensor outputGateBias(
1804  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1805  outputGateBiasData);
1806 
1807  // CIFG disabled params
1808  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1809  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
1810  ConstTensor inputToInputWeights(
1811  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1812  inputToInputWeightsData);
1813 
1814  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1815  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
1816  ConstTensor recurrentToInputWeights(
1817  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1818  recurrentToInputWeightsData);
1819 
1820  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1821  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
1822  ConstTensor inputGateBias(
1823  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1824  inputGateBiasData);
1825 
1826  // Peephole enabled, CIFG disabled params
1827  std::vector<int16_t> cellToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1828  std::vector<unsigned int> cellToInputWeightsDimensions = {1, 1, 3, 3};
1829  ConstTensor cellToInputWeights(
1830  TensorInfo(4, cellToInputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1831  cellToInputWeightsData);
1832 
1833  std::vector<int16_t> cellToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1834  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1835  ConstTensor cellToForgetWeights(
1836  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1837  cellToForgetWeightsData);
1838 
1839  std::vector<int16_t> cellToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1840  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1841  ConstTensor cellToOutputWeights(
1842  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1843  cellToOutputWeightsData);
1844 
1845  LstmInputParams params;
1846 
1847  // Basic params
1848  params.m_InputToForgetWeights = &inputToForgetWeights;
1849  params.m_InputToCellWeights = &inputToCellWeights;
1850  params.m_InputToOutputWeights = &inputToOutputWeights;
1851  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1852  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1853  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1854  params.m_ForgetGateBias = &forgetGateBias;
1855  params.m_CellBias = &cellBias;
1856  params.m_OutputGateBias = &outputGateBias;
1857 
1858  // CIFG disabled params
1859  params.m_InputToInputWeights = &inputToInputWeights;
1860  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
1861  params.m_InputGateBias = &inputGateBias;
1862 
1863  // Peephole enabled, CIFG disabled params
1864  params.m_CellToInputWeights = &cellToInputWeights;
1865  params.m_CellToForgetWeights = &cellToForgetWeights;
1866  params.m_CellToOutputWeights = &cellToOutputWeights;
1867 
1868  TestQLstmLayerVisitor visitor(descriptor, params);
1869 
1870  NetworkImpl net;
1871 
1872  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
1873  layer->ExecuteStrategy(visitor);
1874 }
1875 
1876 TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
1877 {
1878  QLstmDescriptor descriptor;
1879  descriptor.m_ProjectionClip = 0.5f;
1880  descriptor.m_CellClip = 0.3f;
1881  descriptor.m_CifgEnabled = true;
1882  descriptor.m_PeepholeEnabled = true;
1883 
1884  // Basic params
1885  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1886  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1887  ConstTensor inputToForgetWeights(
1888  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1889  inputToForgetWeightsData);
1890 
1891  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1892  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1893  ConstTensor inputToCellWeights(
1894  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1895  inputToCellWeightsData);
1896 
1897  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1898  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
1899  ConstTensor inputToOutputWeights(
1900  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1901  inputToOutputWeightsData);
1902 
1903  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1904  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
1905  ConstTensor recurrentToForgetWeights(
1906  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1907  recurrentToForgetWeightsData);
1908 
1909  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1910  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
1911  ConstTensor recurrentToCellWeights(
1912  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1913  recurrentToCellWeightsData);
1914 
1915  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1916  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
1917  ConstTensor recurrentToOutputWeights(
1918  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1919  recurrentToOutputWeightsData);
1920 
1921  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1922  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
1923  ConstTensor forgetGateBias(
1924  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1925  forgetGateBiasData);
1926 
1927  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1928  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
1929  ConstTensor cellBias(
1930  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1931  cellBiasData);
1932 
1933  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1934  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
1935  ConstTensor outputGateBias(
1936  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
1937  outputGateBiasData);
1938 
1939  // Peephole enabled and CIFG enabled params
1940  std::vector<int16_t> cellToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1941  std::vector<unsigned int> cellToForgetWeightsDimensions = {1, 1, 3, 3};
1942  ConstTensor cellToForgetWeights(
1943  TensorInfo(4, cellToForgetWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1944  cellToForgetWeightsData);
1945 
1946  std::vector<int16_t> cellToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1947  std::vector<unsigned int> cellToOutputWeightsDimensions = {1, 1, 3, 3};
1948  ConstTensor cellToOutputWeights(
1949  TensorInfo(4, cellToOutputWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
1950  cellToOutputWeightsData);
1951 
1952  LstmInputParams params;
1953 
1954  // Basic params
1955  params.m_InputToForgetWeights = &inputToForgetWeights;
1956  params.m_InputToCellWeights = &inputToCellWeights;
1957  params.m_InputToOutputWeights = &inputToOutputWeights;
1958  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
1959  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
1960  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
1961  params.m_ForgetGateBias = &forgetGateBias;
1962  params.m_CellBias = &cellBias;
1963  params.m_OutputGateBias = &outputGateBias;
1964 
1965  // Peephole enabled and CIFG enabled params
1966  params.m_CellToForgetWeights = &cellToForgetWeights;
1967  params.m_CellToOutputWeights = &cellToOutputWeights;
1968 
1969  TestQLstmLayerVisitor visitor(descriptor, params);
1970 
1971  NetworkImpl net;
1972 
1973  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
1974  layer->ExecuteStrategy(visitor);
1975 }
1976 
1977 TEST_CASE("CheckQLstmLayerProjectionEnabled")
1978 {
1979  QLstmDescriptor descriptor;
1980  descriptor.m_ProjectionClip = 0.5f;
1981  descriptor.m_CellClip = 0.3f;
1982  descriptor.m_CifgEnabled = true;
1983  descriptor.m_ProjectionEnabled = true;
1984 
1985  // Basic params ONLY
1986  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1987  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
1988  ConstTensor inputToForgetWeights(
1989  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1990  inputToForgetWeightsData);
1991 
1992  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1993  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
1994  ConstTensor inputToCellWeights(
1995  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
1996  inputToCellWeightsData);
1997 
1998  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
1999  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
2000  ConstTensor inputToOutputWeights(
2001  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2002  inputToOutputWeightsData);
2003 
2004  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2005  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
2006  ConstTensor recurrentToForgetWeights(
2007  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2008  recurrentToForgetWeightsData);
2009 
2010  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2011  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
2012  ConstTensor recurrentToCellWeights(
2013  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2014  recurrentToCellWeightsData);
2015 
2016  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2017  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
2018  ConstTensor recurrentToOutputWeights(
2019  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2020  recurrentToOutputWeightsData);
2021 
2022  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2023  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
2024  ConstTensor forgetGateBias(
2025  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2026  forgetGateBiasData);
2027 
2028  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2029  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
2030  ConstTensor cellBias(
2031  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2032  cellBiasData);
2033 
2034  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2035  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
2036  ConstTensor outputGateBias(
2037  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2038  outputGateBiasData);
2039 
2040  // Projection enabled params
2041  std::vector<uint8_t> projectionWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2042  std::vector<unsigned int> projectionWeightsDimensions = {1, 1, 3, 3};
2043  ConstTensor projectionWeights(
2044  TensorInfo(4, projectionWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2045  projectionWeightsData);
2046 
2047  std::vector<int32_t> projectionBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2048  std::vector<unsigned int> projectionBiasDimensions = {1, 1, 3, 3};
2049  ConstTensor projectionBias(
2050  TensorInfo(4, projectionBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2051  projectionBiasData);
2052 
2053  LstmInputParams params;
2054 
2055  // Basic params
2056  params.m_InputToForgetWeights = &inputToForgetWeights;
2057  params.m_InputToCellWeights = &inputToCellWeights;
2058  params.m_InputToOutputWeights = &inputToOutputWeights;
2059  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2060  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
2061  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2062  params.m_ForgetGateBias = &forgetGateBias;
2063  params.m_CellBias = &cellBias;
2064  params.m_OutputGateBias = &outputGateBias;
2065 
2066  // Projection enabled params
2067  params.m_ProjectionWeights = &projectionWeights;
2068  params.m_ProjectionBias = &projectionBias;
2069 
2070  TestQLstmLayerVisitor visitor(descriptor, params);
2071 
2072  NetworkImpl net;
2073 
2074  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
2075  layer->ExecuteStrategy(visitor);
2076 }
2077 
2078 TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
2079 {
2080  QLstmDescriptor descriptor;
2081  descriptor.m_ProjectionClip = 0.5f;
2082  descriptor.m_CellClip = 0.3f;
2083  descriptor.m_CifgEnabled = false;
2084  descriptor.m_LayerNormEnabled = true;
2085 
2086  // Basic params
2087  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2088  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
2089  ConstTensor inputToForgetWeights(
2090  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2091  inputToForgetWeightsData);
2092 
2093  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2094  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
2095  ConstTensor inputToCellWeights(
2096  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2097  inputToCellWeightsData);
2098 
2099  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2100  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
2101  ConstTensor inputToOutputWeights(
2102  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2103  inputToOutputWeightsData);
2104 
2105  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2106  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
2107  ConstTensor recurrentToForgetWeights(
2108  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2109  recurrentToForgetWeightsData);
2110 
2111  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2112  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
2113  ConstTensor recurrentToCellWeights(
2114  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2115  recurrentToCellWeightsData);
2116 
2117  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2118  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
2119  ConstTensor recurrentToOutputWeights(
2120  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2121  recurrentToOutputWeightsData);
2122 
2123  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2124  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
2125  ConstTensor forgetGateBias(
2126  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2127  forgetGateBiasData);
2128 
2129  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2130  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
2131  ConstTensor cellBias(
2132  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2133  cellBiasData);
2134 
2135  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2136  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
2137  ConstTensor outputGateBias(
2138  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2139  outputGateBiasData);
2140 
2141  // CIFG disabled params
2142  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2143  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
2144  ConstTensor inputToInputWeights(
2145  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2146  inputToInputWeightsData);
2147 
2148  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2149  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
2150  ConstTensor recurrentToInputWeights(
2151  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2152  recurrentToInputWeightsData);
2153 
2154  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2155  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
2156  ConstTensor inputGateBias(
2157  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2158  inputGateBiasData);
2159 
2160  // Layer Norm enabled, CIFG disabled params
2161  std::vector<int16_t> inputLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2162  std::vector<unsigned int> inputLayerNormWeightsDimensions = {1, 1, 3, 3};
2163  ConstTensor inputLayerNormWeights(
2164  TensorInfo(4, inputLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
2165  inputLayerNormWeightsData);
2166 
2167  std::vector<int16_t> forgetLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2168  std::vector<unsigned int> forgetLayerNormWeightsDimensions = {1, 1, 3, 3};
2169  ConstTensor forgetLayerNormWeights(
2170  TensorInfo(4, forgetLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
2171  forgetLayerNormWeightsData);
2172 
2173  std::vector<int16_t> cellLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2174  std::vector<unsigned int> cellLayerNormWeightsDimensions = {1, 1, 3, 3};
2175  ConstTensor cellLayerNormWeights(
2176  TensorInfo(4, cellLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
2177  cellLayerNormWeightsData);
2178 
2179  std::vector<int16_t> outputLayerNormWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2180  std::vector<unsigned int> outputLayerNormWeightsDimensions = {1, 1, 3, 3};
2181  ConstTensor outputLayerNormWeights(
2182  TensorInfo(4, outputLayerNormWeightsDimensions.data(), DataType::QSymmS16, 0.0f, 0, true),
2183  outputLayerNormWeightsData);
2184 
2185  LstmInputParams params;
2186 
2187  // Basic params
2188  params.m_InputToForgetWeights = &inputToForgetWeights;
2189  params.m_InputToCellWeights = &inputToCellWeights;
2190  params.m_InputToOutputWeights = &inputToOutputWeights;
2191  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2192  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
2193  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2194  params.m_ForgetGateBias = &forgetGateBias;
2195  params.m_CellBias = &cellBias;
2196  params.m_OutputGateBias = &outputGateBias;
2197 
2198  // CIFG disabled params
2199  params.m_InputToInputWeights = &inputToInputWeights;
2200  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
2201  params.m_InputGateBias = &inputGateBias;
2202 
2203  // Layer Norm enabled, CIFG disabled params
2204  params.m_InputLayerNormWeights = &inputLayerNormWeights;
2205  params.m_ForgetLayerNormWeights = &forgetLayerNormWeights;
2206  params.m_CellLayerNormWeights = &cellLayerNormWeights;
2207  params.m_OutputLayerNormWeights = &outputLayerNormWeights;
2208 
2209  TestQLstmLayerVisitor visitor(descriptor, params);
2210 
2211  NetworkImpl net;
2212 
2213  IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
2214  layer->ExecuteStrategy(visitor);
2215 }
2216 
2217 
2218 TEST_CASE("CheckQuantizedLstmLayer")
2219 {
2220  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2221  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
2222  ConstTensor inputToInputWeights(
2223  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2224  inputToInputWeightsData);
2225 
2226  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2227  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
2228  ConstTensor inputToForgetWeights(
2229  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2230  inputToForgetWeightsData);
2231 
2232  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2233  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
2234  ConstTensor inputToCellWeights(
2235  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2236  inputToCellWeightsData);
2237 
2238  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2239  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
2240  ConstTensor inputToOutputWeights(
2241  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2242  inputToOutputWeightsData);
2243 
2244 
2245  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2246  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
2247  ConstTensor recurrentToInputWeights(
2248  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2249  recurrentToInputWeightsData);
2250 
2251  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2252  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
2253  ConstTensor recurrentToForgetWeights(
2254  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2255  recurrentToForgetWeightsData);
2256 
2257  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2258  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
2259  ConstTensor recurrentToCellWeights(
2260  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2261  recurrentToCellWeightsData);
2262 
2263  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2264  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
2265  ConstTensor recurrentToOutputWeights(
2266  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QSymmS8, 0.0f, 0, true),
2267  recurrentToOutputWeightsData);
2268 
2269 
2270  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2271  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
2272  ConstTensor inputGateBias(
2273  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2274  inputGateBiasData);
2275 
2276  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2277  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
2278  ConstTensor forgetGateBias(
2279  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2280  forgetGateBiasData);
2281 
2282  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2283  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
2284  ConstTensor cellBias(
2285  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2286  cellBiasData);
2287 
2288  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2289  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
2290  ConstTensor outputGateBias(
2291  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2292  outputGateBiasData);
2293 
2294  QuantizedLstmInputParams params;
2295 
2296  params.m_InputToInputWeights = &inputToInputWeights;
2297  params.m_InputToForgetWeights = &inputToForgetWeights;
2298  params.m_InputToCellWeights = &inputToCellWeights;
2299  params.m_InputToOutputWeights = &inputToOutputWeights;
2300 
2301  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
2302  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2303  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
2304  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2305 
2306  params.m_InputGateBias = &inputGateBias;
2307  params.m_ForgetGateBias = &forgetGateBias;
2308  params.m_CellBias = &cellBias;
2309  params.m_OutputGateBias = &outputGateBias;
2310 
2311  TestQuantizedLstmLayerVisitor visitor(params);
2312 
2313  NetworkImpl net;
2314 
2315  IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params);
2316  layer->ExecuteStrategy(visitor);
2317 }
2318 
2319 TEST_CASE("CheckNamedQuantizedLstmLayer")
2320 {
2321  const char* layerName = "LstmLayer";
2322  std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2323  std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
2324  ConstTensor inputToInputWeights(
2325  TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2326  inputToInputWeightsData);
2327 
2328  std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2329  std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
2330  ConstTensor inputToForgetWeights(
2331  TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2332  inputToForgetWeightsData);
2333 
2334  std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2335  std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
2336  ConstTensor inputToCellWeights(
2337  TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2338  inputToCellWeightsData);
2339 
2340  std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2341  std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
2342  ConstTensor inputToOutputWeights(
2343  TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2344  inputToOutputWeightsData);
2345 
2346 
2347  std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2348  std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
2349  ConstTensor recurrentToInputWeights(
2350  TensorInfo(4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2351  recurrentToInputWeightsData);
2352 
2353  std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2354  std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
2355  ConstTensor recurrentToForgetWeights(
2356  TensorInfo(4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2357  recurrentToForgetWeightsData);
2358 
2359  std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2360  std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
2361  ConstTensor recurrentToCellWeights(
2362  TensorInfo(4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2363  recurrentToCellWeightsData);
2364 
2365  std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2366  std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
2367  ConstTensor recurrentToOutputWeights(
2368  TensorInfo(4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8, 0.0f, 0, true),
2369  recurrentToOutputWeightsData);
2370 
2371 
2372  std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2373  std::vector<unsigned int> inputGateBiasDimensions = {1, 1, 3, 3};
2374  ConstTensor inputGateBias(
2375  TensorInfo(4, inputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2376  inputGateBiasData);
2377 
2378  std::vector<int32_t> forgetGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2379  std::vector<unsigned int> forgetGateBiasDimensions = {1, 1, 3, 3};
2380  ConstTensor forgetGateBias(
2381  TensorInfo(4, forgetGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2382  forgetGateBiasData);
2383 
2384  std::vector<int32_t> cellBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2385  std::vector<unsigned int> cellBiasDimensions = {1, 1, 3, 3};
2386  ConstTensor cellBias(
2387  TensorInfo(4, cellBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2388  cellBiasData);
2389 
2390  std::vector<int32_t> outputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
2391  std::vector<unsigned int> outputGateBiasDimensions = {1, 1, 3, 3};
2392  ConstTensor outputGateBias(
2393  TensorInfo(4, outputGateBiasDimensions.data(), DataType::Signed32, 0.0f, 0, true),
2394  outputGateBiasData);
2395 
2396  QuantizedLstmInputParams params;
2397 
2398  params.m_InputToInputWeights = &inputToInputWeights;
2399  params.m_InputToForgetWeights = &inputToForgetWeights;
2400  params.m_InputToCellWeights = &inputToCellWeights;
2401  params.m_InputToOutputWeights = &inputToOutputWeights;
2402 
2403  params.m_RecurrentToInputWeights = &recurrentToInputWeights;
2404  params.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
2405  params.m_RecurrentToCellWeights = &recurrentToCellWeights;
2406  params.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
2407 
2408  params.m_InputGateBias = &inputGateBias;
2409  params.m_ForgetGateBias = &forgetGateBias;
2410  params.m_CellBias = &cellBias;
2411  params.m_OutputGateBias = &outputGateBias;
2412 
2413  TestQuantizedLstmLayerVisitor visitor(params, layerName);
2414 
2415  NetworkImpl net;
2416 
2417  IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params, layerName);
2418  layer->ExecuteStrategy(visitor);
2419 }
2420 
2421 }
2422 
2423 } // namespace armnn
TEST_SUITE("TestConstTensorLayerVisitor")
void CheckDescriptor(const BatchNormalizationDescriptor &descriptor)
void CheckDescriptor(const QLstmDescriptor &descriptor)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
bool m_BiasEnabled
Enable/disable bias.
const ConstTensor * m_RecurrentToOutputWeights
uint32_t m_PadBottom
Padding bottom value in the height dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
const ConstTensor * m_RecurrentToForgetWeights
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
void CheckDescriptor(const FullyConnectedDescriptor &descriptor)
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
IConnectableLayer * AddQuantizedLstmLayer(const QuantizedLstmInputParams &params, const char *name=nullptr)
Definition: Network.cpp:2540
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
IConnectableLayer * AddConstantLayer(const ConstTensor &input, const char *name=nullptr)
Definition: Network.cpp:2264
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
IConnectableLayer * AddConvolution2dLayer(const Convolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Definition: Network.cpp:2030
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
IConnectableLayer * AddDepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor, const char *name=nullptr)
Definition: Network.cpp:2082
uint32_t m_PadRight
Padding right value in the width dimension.
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
Copyright (c) 2021 ARM Limited and Contributors.
Private implementation of INetwork.
Definition: Network.hpp:31
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
IConnectableLayer * AddQLstmLayer(const QLstmDescriptor &descriptor, const LstmInputParams &params, const char *name=nullptr)
Definition: Network.cpp:2578
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool m_LayerNormEnabled
Enable/disable layer normalization.
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
An LstmDescriptor for the LstmLayer.
uint32_t m_PadTop
Padding top value in the height dimension.
void CheckDescriptor(const LstmDescriptor &descriptor)
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
const ConstTensor * m_InputToForgetWeights
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
bool m_PeepholeEnabled
Enable/disable peephole.
void CheckDescriptor(const Convolution2dDescriptor &convolution2dDescriptor)
A QLstmDescriptor for the QLstmLayer.
uint32_t m_ActivationFunc
The activation function to use.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CheckDescriptor(const DepthwiseConvolution2dDescriptor &convolution2dDescriptor)
const ConstTensor * m_RecurrentToInputWeights
float m_ClippingThresCell
Clipping threshold value for the cell state.
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
float m_CellClip
Clipping threshold value for the cell state.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
void CheckConstTensorPtrs(const std::string &name, const ConstTensor *expected, const ConstTensor *actual)
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
IConnectableLayer * AddFullyConnectedLayer(const FullyConnectedDescriptor &fullyConnectedDescriptor, const char *name=nullptr)
Definition: Network.cpp:1942
bool m_ProjectionEnabled
Enable/disable the projection layer.
void CheckInputParameters(const QuantizedLstmInputParams &params)
const ConstTensor * m_RecurrentToCellWeights
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
const ConstTensor * m_InputToOutputWeights
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
IConnectableLayer * AddBatchNormalizationLayer(const BatchNormalizationDescriptor &desc, const ConstTensor &mean, const ConstTensor &variance, const ConstTensor &beta, const ConstTensor &gamma, const char *name=nullptr)
Definition: Network.cpp:2208
virtual int Connect(IInputSlot &destination)=0
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
virtual ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const =0
Apply a visitor to this layer.
IConnectableLayer * AddLstmLayer(const LstmDescriptor &descriptor, const LstmInputParams &params, const char *name=nullptr)
Definition: Network.cpp:2296
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
uint32_t m_PadRight
Padding right value in the width dimension.
bool m_ConstantWeights
Enable/disable constant weights and biases.
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40