ArmNN
 22.02
UnidirectionalSequenceLstmTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 
11 
14 
15 #include <ResolveType.hpp>
16 
17 namespace {
18 
19 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
20 LayerTestResult<T, 3> UnidirectionalSequenceLstmLayerFloat32TestImpl(
21  armnn::IWorkloadFactory& workloadFactory,
23  const armnn::ITensorHandleFactory& tensorHandleFactory,
24  const std::vector<T>& input,
25  const std::vector<T>& outputExpected,
26  const armnn::TensorShape& inputShape,
27  const armnn::TensorShape& outputExpectedShape,
28  float qScale = 0.0f,
29  int32_t qOffset = 0,
30  armnn::DataType constantDataType = armnn::DataType::Float32) {
31  IgnoreUnused(memoryManager);
32  unsigned int batchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
33  unsigned int timeSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
34  unsigned int inputSize = armnn::numeric_cast<unsigned int>(inputShape[2]);
35  unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
36  unsigned numUnits = outputSize;
37 
38  armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, ArmnnType, qScale, qOffset);
39  armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
40  armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
41 
42  armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, ArmnnType, qScale, qOffset);
43 
44  std::vector<T> inputVector;
45  inputVector.assign(input.data(), input.data() + (batchSize * timeSize * inputSize));
46 
47  std::vector<T> cellStateInVector(batchSize * numUnits, T());
48  std::vector<T> outputStateInVector(batchSize * outputSize, T());
49 
50  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
51 
52  std::vector<T> outputVector;
53  outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * timeSize * outputSize));
54 
55  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
56  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
57  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
58  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
59  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
60 
61  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
62 
65 
66  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
67  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
68  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
69 
70  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
71 
72  armnn::TensorInfo tensorInfo4({numUnits}, constantDataType, qScale, qOffset);
73  armnn::TensorInfo tensorInfo12({numUnits, 3}, constantDataType, qScale, qOffset);
74  armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
75 
76  std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
77  -0.117484632f, 0.3298470976f, -0.1179017122f,
78  0.214305695f, 0.42135173085f, 0.003878414626f,
79  -0.348303917f, -0.1881275477f, 0.0343011027f };
80 
81  std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
82  -0.3810434485f, 0.268383264f, -0.009807467424f,
83  -0.3522925403f, -0.24275735512f, -0.28344226125f,
84  0.13512269116f, -0.4932442977f, -0.10039821991f };
85 
86  std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
87  0.386399507f, -0.259465157985f, -0.16545993089f,
88  -0.4230232555f, 0.341664791103f, -0.18127849691f,
89  -0.2277662414f, -0.55275535589f, 0.34184026718f };
90 
91  std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
92  0.53969591851f, 0.23393625035f, -0.27140527306f,
93  0.50009280443f, 0.07511717046f, 0.3998299249f,
94  -0.51717478049f, 0.1889653282f, -0.367323637f };
95 
96  std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
97  -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
98  0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
99  0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f };
100 
101  std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
102  -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
103  -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
104  -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
105 
106  std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
107  -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
108  0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
109  0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
110 
111  std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
112  -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
113  0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
114  -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
115 
116  std::vector<float> inputGateBias = { 0., 0., 0., 0. };
117 
118  std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
119 
120  std::vector<float> cellBias = { 0., 0., 0., 0. };
121 
122  std::vector<float> outputGateBias = { 0., 0., 0., 0. };
123 
124  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo12);
125  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo12);
126  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo12);
127  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo12);
128  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
129  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
130  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
131  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
132  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
133  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
134  armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
135  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
136 
137  AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
138  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
139  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
140  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
141  AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
142  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
143  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
144  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
145  AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
146  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
147  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
148  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
149 
150  data.m_InputToInputWeights = &inputToInputWeightsTensor;
151  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
152  data.m_InputToCellWeights = &inputToCellWeightsTensor;
153  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
154  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
155  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
156  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
157  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
158  data.m_InputGateBias = &inputGateBiasTensor;
159  data.m_ForgetGateBias = &forgetGateBiasTensor;
160  data.m_CellBias = &cellBiasTensor;
161  data.m_OutputGateBias = &outputGateBiasTensor;
162 
163  // Flags to set test configuration
167  data.m_Parameters.m_CifgEnabled = false;
168  data.m_Parameters.m_PeepholeEnabled = false;
169  data.m_Parameters.m_ProjectionEnabled = false;
170  data.m_Parameters.m_TimeMajor = false;
171 
172  std::unique_ptr<armnn::IWorkload> workload
173  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
174  inputHandle->Allocate();
175  outputStateInHandle->Allocate();
176  cellStateInHandle->Allocate();
177 
178  outputHandle->Allocate();
179 
180  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
181  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
182  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
183 
184  workload->Execute();
185 
186  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
187 
188  return LayerTestResult<T, 3>(actualOutput,
189  outputVector,
190  outputHandle->GetShape(),
191  outputTensorInfo.GetShape());
192 }
193 
194 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
196 UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl(
197  armnn::IWorkloadFactory& workloadFactory,
199  const armnn::ITensorHandleFactory& tensorHandleFactory,
200  const std::vector<T>& input,
201  const std::vector<T>& outputExpected,
202  const armnn::TensorShape& inputShape,
203  const armnn::TensorShape& outputExpectedShape,
204  float qScale = 0.0f,
205  int32_t qOffset = 0,
206  armnn::DataType constantDataType = armnn::DataType::Float32) {
207  IgnoreUnused(memoryManager);
208  unsigned int batchSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
209  unsigned int timeSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
210  unsigned int inputSize = armnn::numeric_cast<unsigned int>(inputShape[2]);
211  unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
212  unsigned numUnits = outputSize;
213 
214  armnn::TensorInfo inputTensorInfo({timeSize, batchSize, inputSize}, ArmnnType, qScale, qOffset);
215  armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
216  armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
217 
218  armnn::TensorInfo outputTensorInfo({timeSize, batchSize, outputSize}, ArmnnType, qScale, qOffset);
219 
220  std::vector<T> inputVector;
221  inputVector.assign(input.data(), input.data() + (batchSize * timeSize * inputSize));
222 
223  std::vector<T> cellStateInVector(batchSize * numUnits, T());
224  std::vector<T> outputStateInVector(batchSize * outputSize, T());
225 
226  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
227 
228  std::vector<T> outputVector;
229  outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * timeSize * outputSize));
230 
231  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
232  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
233  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
234  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
235  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
236 
237  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
238 
240  armnn::WorkloadInfo info;
241 
242  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
243  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
244  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
245 
246  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
247 
248  armnn::TensorInfo tensorInfo4({numUnits}, constantDataType, qScale, qOffset);
249  armnn::TensorInfo tensorInfo12({numUnits, 3}, constantDataType, qScale, qOffset);
250  armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
251 
252  std::vector<float> inputToInputWeights = { 0.27277296781539917f, 0.3813590407371521f, -0.394489049911499f,
253  0.2782636880874634f, -0.3793870210647583f, -0.018918335437774658f,
254  0.2724653482437134f, -0.19314253330230713f, -0.2947450876235962f,
255  -0.30253493785858154f, 0.4241350293159485f, -0.22560018301010132f };
256 
257  std::vector<float> inputToForgetWeights = { -0.2667974531650543f, -0.05505800247192383f, -0.20932340621948242f,
258  -0.14345619082450867f, 0.09666192531585693f, -0.2604355812072754f,
259  -0.2681812047958374f, -0.3314584493637085f, 0.4485899806022644f,
260  -0.23467743396759033f, 0.5072842240333557f, -0.4192768931388855f };
261 
262  std::vector<float> inputToCellWeights = { -0.15782442688941956f, -0.027530014514923096f, 0.4789854884147644f,
263  0.23227906227111816f, 0.28259342908859253f, -0.030095696449279785f,
264  0.10071521997451782f, -0.08535495400428772f, 0.18563997745513916f,
265  -0.3049069046974182f, -0.478048175573349f, 0.025234103202819824f };
266 
267  std::vector<float> inputToOutputWeights = { -0.04584759473800659f, -0.2716066539287567f, 0.012970447540283203f,
268  -0.4729190170764923f, -0.37422770261764526f, 0.49352723360061646f,
269  0.3163864016532898f, -0.436781644821167f, -0.33074596524238586f,
270  -0.32885751128196716f, -0.40959352254867554f, -0.2124689817428589f };
271 
272  std::vector<float> recurrentToInputWeights = { 0.23788475990f, -0.24948765337f, 0.50044941902f, 0.14431896805f,
273  -0.115940228137f, -0.717082679f, -0.17208620906f, 0.17850610617f,
274  -0.16702319684f, -0.11384502053f, -0.309785276245f, -0.3316611672f,
275  0.52380162477f, -0.06839632987f, -0.391478359627f, -0.10756178963f };
276 
277  std::vector<float> recurrentToForgetWeights = { 0.11383482068f, 0.1676601767f, -0.08550968004f, 0.03399394089f,
278  0.08042152225f, -0.2133381964f, 0.05182432704f, 0.38161808255f,
279  -0.5018365979f, -0.08043262364f, 0.07894329014f, -0.07547105155f,
280  0.12047368288f, 0.2986997961f, 0.0485043078f, -0.13372567296f };
281 
282  std::vector<float> recurrentToCellWeights = { 0.0433832928545f, 0.07587072294f, -0.120520234107f, 0.604576051f,
283  -0.434353142986f, 0.009314475068f, 0.005085289478f, 0.08488202038f,
284  -0.00025437487886f, 0.15245915082f, -0.1936587542f, 0.004754020f,
285  -0.1582719236f, 0.3307867646f, 0.0236605107784f, 0.307716339826f };
286 
287  std::vector<float> recurrentToOutputWeights = { -0.079031050201f, 0.041414566286f, -0.583727357285f, 0.1025384515f,
288  -0.172372072937f, 0.09214124082f, 0.178184121827f, -0.2439443916f,
289  0.104485116899f, 0.2600405514f, 0.064414866268f, 0.24141204357f,
290  0.281875759363f, -0.14234502664f, 0.15126448862f, -0.24421440064f };
291 
292  std::vector<float> inputGateBias = { 0., 0., 0., 0. };
293 
294  std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
295 
296  std::vector<float> cellBias = { 0., 0., 0., 0. };
297 
298  std::vector<float> outputGateBias = { 0., 0., 0., 0. };
299 
300  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo12);
301  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo12);
302  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo12);
303  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo12);
304  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
305  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
306  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
307  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
308  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
309  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
310  armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
311  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
312 
313  AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
314  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
315  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
316  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
317  AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
318  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
319  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
320  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
321  AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
322  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
323  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
324  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
325 
326  data.m_InputToInputWeights = &inputToInputWeightsTensor;
327  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
328  data.m_InputToCellWeights = &inputToCellWeightsTensor;
329  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
330  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
331  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
332  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
333  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
334  data.m_InputGateBias = &inputGateBiasTensor;
335  data.m_ForgetGateBias = &forgetGateBiasTensor;
336  data.m_CellBias = &cellBiasTensor;
337  data.m_OutputGateBias = &outputGateBiasTensor;
338 
339  // Flags to set test configuration
343  data.m_Parameters.m_CifgEnabled = false;
344  data.m_Parameters.m_PeepholeEnabled = false;
345  data.m_Parameters.m_ProjectionEnabled = false;
346  data.m_Parameters.m_TimeMajor = true;
347 
348  std::unique_ptr<armnn::IWorkload> workload
349  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
350  inputHandle->Allocate();
351  outputStateInHandle->Allocate();
352  cellStateInHandle->Allocate();
353 
354  outputHandle->Allocate();
355 
356  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
357  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
358  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
359 
360  workload->Execute();
361 
362  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
363 
364  return LayerTestResult<T, 3>(actualOutput,
365  outputVector,
366  outputHandle->GetShape(),
367  outputTensorInfo.GetShape());
368 }
369 
370 } // anonymous namespace
371 
373  armnn::IWorkloadFactory& workloadFactory,
375  const armnn::ITensorHandleFactory& tensorHandleFactory) {
376  armnn::TensorInfo inputInfo({3, 2, 3}, armnn::DataType::Float32);
377  std::vector<float> input = { 1., 2., 3., 4., 5., 4.,
378  3., 2., 1., 2., 3., 4.,
379  5., 4., 3., 2., 1., 2. };
380 
381  armnn::TensorInfo outputInfo({3, 2, 4}, armnn::DataType::Float32);
382  std::vector<float> expectedOutput = { -0.07149004f, -0.1621171f, -0.17516759f, -0.0232934225f,
383  -0.16810727f, -0.41412935f, -0.5498753f, -0.00803578f,
384  -0.06687349f, 0.204077631f, -0.4276504f, -0.03123213f,
385  -0.12000261f, -0.0941918f, -0.45639035f, -0.02870186f,
386  -0.03429216f, 0.20824050f, -0.6569892f, -0.004152651f,
387  -0.10493034f, 0.14210969f, -0.58347696f, -0.03297536f };
388  return UnidirectionalSequenceLstmLayerFloat32TestImpl<armnn::DataType::Float32>(
389  workloadFactory, memoryManager, tensorHandleFactory,
390  input, expectedOutput, inputInfo.GetShape(), outputInfo.GetShape());
391 }
392 
394  armnn::IWorkloadFactory& workloadFactory,
396  const armnn::ITensorHandleFactory& tensorHandleFactory) {
397  armnn::TensorInfo inputInfo({2, 3, 3}, armnn::DataType::Float32);
398  std::vector<float> input = { 1., 2., 3., 4., 5., 4.,
399  3., 2., 1., 2., 3., 4.,
400  5., 4., 3., 2., 1., 2. };
401 
402  armnn::TensorInfo outputInfo({2, 3, 4}, armnn::DataType::Float32);
403  std::vector<float> expectedOutput = { 0.135657698f, 0.124672532f, 0.0212090332f, -0.0530203655f,
404  0.106138252f, 0.0404792242f, 0.0151643595f, -0.00675163185f,
405  -0.0128514022f, 0.0644884035f, 0.0709072053f, -0.0454045124f,
406  0.16288602f, 0.16649379f, 0.02770456f, -0.03698075f,
407  0.11171641f, 0.043119f , 0.0762981f , -0.01228541f,
408  0.10439701f, 0.21439962f, 0.11919238f, -0.08390583f };
409  return UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl<armnn::DataType::Float32>(
410  workloadFactory, memoryManager, tensorHandleFactory,
411  input, expectedOutput, inputInfo.GetShape(), outputInfo.GetShape());
412 }
413 
415  armnn::IWorkloadFactory& workloadFactory,
417  const armnn::ITensorHandleFactory& tensorHandleFactory)
418 {
419  IgnoreUnused(memoryManager);
420  unsigned int batchSize = 2;
421  unsigned int timeSize = 3;
422  unsigned int outputSize = 5;
423  unsigned int inputSize = 4;
424  unsigned numUnits = 6;
425 
426  armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
427  armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
428  armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
429  armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
430 
431  const std::vector<float> inputVector = { 1., 2., 3., 4., 5., 4.,
432  3., 2., 1., 2., 3., 4.,
433  5., 4., 3., 2., 1., 2.,
434  1., 2., 3., 4., 5., 4.};
435 
436  std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
437  std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
438 
439  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
440 
441  const std::vector<float> expectedOutput = { -0.0135612f, -0.0263441f, 0.0314008f, -0.00883455f, 0.00763052f,
442  -0.00126877f, -0.0292959f, 0.0449957f, -0.00976195f, -0.00492338f,
443  -0.0175702f, -0.0431753f, 0.0597117f, -0.0169154f, 0.0142087f,
444  0.00472515f, -0.0196355f, 0.0342524f, -0.00407936f, -0.0253189f,
445  -0.00512944f, -0.0293754f, 0.0512771f, -0.0151874f, -0.0246433f,
446  -0.00744986f, -0.0345103f, 0.0450666f, -0.00944991f, 0.0127171f };
447 
448  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
449  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
450  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
451  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
452  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
453  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
454 
456  armnn::WorkloadInfo info;
457 
458  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
459  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
460  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
461  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
462 
463  armnn::TensorInfo tensorInfo5({outputSize}, armnn::DataType::Float32);
464  armnn::TensorInfo tensorInfo6({numUnits}, armnn::DataType::Float32);
465  armnn::TensorInfo tensorInfo6x4({numUnits, inputSize}, armnn::DataType::Float32);
466  armnn::TensorInfo tensorInfo6x5({numUnits, outputSize}, armnn::DataType::Float32);
467  armnn::TensorInfo tensorInfo5x6({outputSize, numUnits}, armnn::DataType::Float32);
468 
469  std::vector<float> inputToInputWeights = { 0.021393683f, 0.06124551f, 0.046905167f, -0.014657677f,
470  -0.03149463f, 0.09171803f, 0.14647801f, 0.10797193f,
471  -0.0057968358f, 0.0019193048f, -0.2726754f, 0.10154029f,
472  -0.018539885f, 0.080349885f, -0.10262385f, -0.022599787f,
473  -0.09121155f, -0.008675967f, -0.045206103f, -0.0821282f,
474  -0.008045952f, 0.015478081f, 0.055217247f, 0.038719587f };
475 
476  std::vector<float> inputToForgetWeights = { -0.0018401089f, -0.004852237f, 0.03698424f, 0.014181704f,
477  0.028273236f, -0.016726194f, -0.05249759f, -0.10204261f,
478  0.00861066f, -0.040979505f, -0.009899187f, 0.01923892f,
479  -0.028177269f, -0.08535103f, -0.14585495f, 0.10662567f,
480  -0.01909731f, -0.017883534f, -0.0047269356f, -0.045103323f,
481  0.0030784295f, 0.076784775f, 0.07463696f, 0.094531395f};
482 
483  std::vector<float> inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
484  -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
485  -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
486  -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
487  -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
488  0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f };
489 
490  std::vector<float> inputToOutputWeights = { -0.0998932f, -0.07201956f, -0.052803773f, -0.15629593f,
491  -0.15001918f, -0.07650751f, 0.02359855f, -0.075155355f,
492  -0.08037709f, -0.15093534f, 0.029517552f, -0.04751393f,
493  0.010350531f, -0.02664851f, -0.016839722f, -0.023121163f,
494  0.0077019283f, 0.012851257f, -0.05040649f, -0.0129761f,
495  -0.021737747f, -0.038305793f, -0.06870586f, -0.01481247f };
496 
497  std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f,
498  0.10380666f, 0.053110216f, -0.06928846f };
499 
500  std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.03032477f,
501  0.23027696f, 0.11098921f, 0.08989442f };
502 
503  std::vector<float> cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f,
504  0.033463873f, -0.1483596f, 0.029460307f };
505 
506  std::vector<float> outputGateBias = { 0.046159424f, -0.0012809046f, 0.03563469f,
507  0.12648113f, 0.027195795f, 0.35373217f };
508 
509  std::vector<float> recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
510  -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
511  -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
512  -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
513  0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
514  0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
515  -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
516  0.14283475f, -0.07390571f };
517 
518  std::vector<float> recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
519  0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
520  0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
521  -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
522  0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
523  0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
524  -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
525  -0.019443132f, -0.030755889f };
526 
527  std::vector<float> recurrentToForgetWeights = { -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
528  0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
529  -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
530  0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
531  0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
532  -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
533  -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
534  0.061878487f, -0.04729229f };
535 
536  std::vector<float> recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,
537  -0.045984812f,-0.01255415f, -0.0026479573f,
538  -0.08196161f, -0.054914974f, -0.0046604523f,
539  -0.029587349f, -0.044576716f, -0.07480124f,
540  -0.082868785f, 0.023254942f, 0.027502948f,
541  -0.0039728214f, -0.08683098f, -0.08116779f,
542  -0.014675607f, -0.037924774f, -0.023314456f,
543  -0.007401714f, -0.09255757f, 0.029460307f,
544  -0.08829125f, -0.005139627f, -0.08989442f,
545  -0.0555066f, 0.13596267f, 0.025062224f };
546 
547  std::vector<float> cellToInputWeights = { 0.040369894f, 0.030746894f, 0.24704495f,
548  0.018586371f, -0.037586458f, -0.15312155f };
549 
550  std::vector<float> cellToForgetWeights = { -0.01998659f, -0.15568835f, -0.24248174f,
551  -0.012770197f, 0.041331276f, -0.072311886f };
552 
553  std::vector<float> cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f,
554  0.002913762f, 0.17764764f, -0.5495371f };
555 
556  std::vector<float> projectionWeights = { -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
557  0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
558  -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
559  -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
560  0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
561  0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f };
562 
563  std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
564 
565  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo6x4);
566  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo6x4);
567  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo6x4);
568  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo6x4);
569  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo6x5);
570  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo6x5);
571  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo6x5);
572  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo6x5);
573  armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo6);
574  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo6);
575  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo6);
576  armnn::ScopedTensorHandle cellBiasTensor(tensorInfo6);
577  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo6);
578  armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo6);
579  armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo6);
580  armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo5x6);
581  armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo5);
582 
583  AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
584  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
585  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
586  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
587  AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
588  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
589  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
590  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
591  AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
592  AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
593  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
594  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
595  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
596  AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
597  AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
598  AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
599  AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
600 
601  data.m_InputToInputWeights = &inputToInputWeightsTensor;
602  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
603  data.m_InputToCellWeights = &inputToCellWeightsTensor;
604  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
605  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
606  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
607  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
608  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
609  data.m_CellToInputWeights = &cellToInputWeightsTensor;
610  data.m_InputGateBias = &inputGateBiasTensor;
611  data.m_ForgetGateBias = &forgetGateBiasTensor;
612  data.m_CellBias = &cellBiasTensor;
613  data.m_OutputGateBias = &outputGateBiasTensor;
614  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
615  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
616  data.m_ProjectionWeights = &projectionWeightsTensor;
617  data.m_ProjectionBias = &projectionBiasTensor;
618 
619  // Flags to set test configuration
621  data.m_Parameters.m_CifgEnabled = false;
622  data.m_Parameters.m_PeepholeEnabled = true;
623  data.m_Parameters.m_ProjectionEnabled = true;
624  data.m_Parameters.m_LayerNormEnabled = false;
625  data.m_Parameters.m_TimeMajor = false;
626  data.m_Parameters.m_ClippingThresCell = 10.0f;
627 
628 
629  std::unique_ptr<armnn::IWorkload> workload
630  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
631  inputHandle->Allocate();
632  outputStateInHandle->Allocate();
633  cellStateInHandle->Allocate();
634  outputHandle->Allocate();
635 
636  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
637  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
638  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
639 
640  workload->Execute();
641 
642  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
643 
644  return LayerTestResult<float, 3>(actualOutput,
645  expectedOutput,
646  outputHandle->GetShape(),
647  outputTensorInfo.GetShape());
648 }
649 
651  armnn::IWorkloadFactory& workloadFactory,
653  const armnn::ITensorHandleFactory& tensorHandleFactory)
654 {
655  IgnoreUnused(memoryManager);
656  unsigned int batchSize = 3;
657  unsigned int timeSize = 2;
658  unsigned int outputSize = 4;
659  unsigned int inputSize = 3;
660  unsigned numUnits = 5;
661 
662  armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
663  armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
664  armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
665  armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
666 
667  const std::vector<float> inputVector = { 1., 2., 3., 4., 5., 4.,
668  3., 2., 1., 2., 3., 4.,
669  5., 4., 3., 2., 1., 2. };
670 
671  std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
672  std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
673 
674  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
675 
676  const std::vector<float> expectedOutput = { 0.0642256f, 0.0343966f, 0.184122f, 0.114717f,
677  0.11458f, 0.0407109f, 0.300327f, 0.174301f,
678  0.0864761f, 0.0362912f, 0.178635f, 0.115689f,
679  0.108008f, 0.0386623f, 0.273471f, 0.167115f,
680  0.0859545f, 0.0331481f, 0.186051f, 0.11888f,
681  0.106649f, 0.0276847f, 0.229863f, 0.166958f };
682 
683  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
684  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
685  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
686  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
687  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
688 
689  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
690 
692  armnn::WorkloadInfo info;
693 
694  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
695  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
696  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
697 
698  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
699 
700  armnn::TensorInfo tensorInfo4({outputSize}, armnn::DataType::Float32);
701  armnn::TensorInfo tensorInfo5({numUnits}, armnn::DataType::Float32);
702  armnn::TensorInfo tensorInfo5x3({numUnits, inputSize}, armnn::DataType::Float32);
703  armnn::TensorInfo tensorInfo5x4({numUnits, outputSize}, armnn::DataType::Float32);
704  armnn::TensorInfo tensorInfo4x5({outputSize, numUnits}, armnn::DataType::Float32);
705 
706  std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
707  -0.117484632f, 0.3298470976f, -0.1179017122f,
708  0.214305695f, 0.42135173085f, 0.003878414626f,
709  -0.348303917f, -0.1881275477f, 0.0343011027f,
710  -0.38837709614f, -0.05636804124f, 0.4259087456f};
711 
712  std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
713  -0.3810434485f, 0.268383264f, -0.009807467424f,
714  -0.3522925403f, -0.24275735512f, -0.28344226125f,
715  0.13512269116f, -0.4932442977f, -0.10039821991f,
716  0.2726137042f, 0.09216640889f, -0.06551410215f};
717 
718  std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
719  0.386399507f, -0.259465157985f, -0.16545993089f,
720  -0.4230232555f, 0.341664791103f, -0.18127849691f,
721  -0.2277662414f, -0.55275535589f, 0.34184026718f,
722  0.3954237699f, -0.19407111404f, 0.30412107706f};
723 
724  std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
725  0.53969591851f, 0.23393625035f, -0.27140527306f,
726  0.50009280443f, 0.07511717046f, 0.3998299249f,
727  -0.51717478049f, 0.1889653282f, -0.367323637f,
728  -0.12584099173f, -0.12319286912f, 0.2407919466f};
729 
730  std::vector<float> inputGateBias{ 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
731  std::vector<float> forgetGateBias{ 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
732  std::vector<float> cellBias{ -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
733  std::vector<float> outputGateBias{ 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
734 
735  std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
736  -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
737  0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
738  0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f,
739  0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f };
740 
741  std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
742  -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
743  -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
744  -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f,
745  0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f };
746 
747  std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
748  -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
749  0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
750  0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f,
751  0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f };
752 
753  std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
754  -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
755  0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
756  -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f,
757  0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f };
758 
759  std::vector<float> cellToInputWeights { 0.05f, 0.1f, 0.25f, 0.15f, -0.02f };
760  std::vector<float> cellToForgetWeights { -0.02f, -0.15f, -0.25f, -0.03f, 0.15f };
761  std::vector<float> cellToOutputWeights { 0.1f, -0.1f, -0.5f, 0.05f, 0.01f };
762 
763  std::vector<float> projectionWeights{ -0.1f, 0.2f, 0.01f, -0.2f,
764  0.1f, 0.5f, 0.3f, 0.08f,
765  0.07f, 0.2f, -0.4f, 0.2f,
766  0.5f, -0.4f, 0.3f, -0.2f,
767  0.3f, 0.08f, -0.07f, 0.2f};
768 
769  std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
770 
771  std::vector<float> inputLayerNormWeights{ 0.1f, 0.2f, 0.3f, 0.5f, 0.8f };
772  std::vector<float> forgetLayerNormWeights{ 0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
773  std::vector<float> cellLayerNormWeights{ 0.7f, 0.2f, 0.3f, 0.8f, 0.5f };
774  std::vector<float> outputLayerNormWeights{ 0.6f, 0.2f, 0.2f, 0.5f, 0.1f };
775 
776  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo5x3);
777  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo5x3);
778  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo5x3);
779  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo5x3);
780  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo5x4);
781  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo5x4);
782  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo5x4);
783  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo5x4);
784  armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo5);
785  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo5);
786  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo5);
787  armnn::ScopedTensorHandle cellBiasTensor(tensorInfo5);
788  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo5);
789  armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo5);
790  armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo5);
791  armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo4x5);
792  armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo4);
793 
794  armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo5);
795  armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo5);
796  armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo5);
797  armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo5);
798 
799  AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
800  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
801  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
802  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
803  AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
804  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
805  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
806  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
807  AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
808  AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
809  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
810  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
811  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
812  AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
813  AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
814  AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
815  AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
816 
817  AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data());
818  AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data());
819  AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data());
820  AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data());
821 
822  data.m_InputToInputWeights = &inputToInputWeightsTensor;
823  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
824  data.m_InputToCellWeights = &inputToCellWeightsTensor;
825  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
826  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
827  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
828  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
829  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
830  data.m_CellToInputWeights = &cellToInputWeightsTensor;
831  data.m_InputGateBias = &inputGateBiasTensor;
832  data.m_ForgetGateBias = &forgetGateBiasTensor;
833  data.m_CellBias = &cellBiasTensor;
834  data.m_OutputGateBias = &outputGateBiasTensor;
835  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
836  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
837  data.m_ProjectionWeights = &projectionWeightsTensor;
838  data.m_ProjectionBias = &projectionBiasTensor;
839 
840  data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
841  data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
842  data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
843  data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
844 
845  // Flags to set test configuration
847  data.m_Parameters.m_CifgEnabled = false;
848  data.m_Parameters.m_PeepholeEnabled = true;
849  data.m_Parameters.m_ProjectionEnabled = true;
850  data.m_Parameters.m_LayerNormEnabled = true;
851  data.m_Parameters.m_TimeMajor = false;
852  data.m_Parameters.m_ClippingThresCell = 10.0f;
853 
854  std::unique_ptr<armnn::IWorkload> workload
855  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
856  inputHandle->Allocate();
857  outputStateInHandle->Allocate();
858  cellStateInHandle->Allocate();
859  outputHandle->Allocate();
860 
861  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
862  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
863  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
864 
865  workload->Execute();
866 
867  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
868 
869  return LayerTestResult<float, 3>(actualOutput,
870  expectedOutput,
871  outputHandle->GetShape(),
872  outputTensorInfo.GetShape());
873 }
874 
876  armnn::IWorkloadFactory& workloadFactory,
878  const armnn::ITensorHandleFactory& tensorHandleFactory)
879 {
880  IgnoreUnused(memoryManager);
881  unsigned int batchSize = 3;
882  unsigned int timeSize = 2;
883  unsigned int inputSize = 3;
884  unsigned int outputSize = 4;
885  unsigned numUnits = outputSize;
886 
887  armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
888  armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
889  armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
890 
891  armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
892 
893  std::vector<float> inputVector = { 1., 2., 3., 4., 5., 4.,
894  3., 2., 1., 2., 3., 4.,
895  5., 4., 3., 2., 1., 2. };
896 
897  std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
898  std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
899 
900  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
901 
902  std::vector<float> outputVector = { -0.0129257f, -0.070531f, -0.153508f, -0.0392391f,
903  -0.0300169f, -0.195717f, -0.528679f, -0.0818106f,
904  -0.0332748f, 0.155429f, -0.353966f, -0.0801505f,
905  -0.032312f, -0.0407911f, -0.435053f, -0.0932317f,
906  -0.0108233f, 0.165584f, -0.640424f, -0.0447535f,
907  -0.031675f, 0.125987f, -0.526695f, -0.110093f };
908 
909  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
910  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
911  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
912  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
913  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
914 
915  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
916 
918  armnn::WorkloadInfo info;
919 
920  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
921  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
922  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
923 
924  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
925 
926  armnn::TensorInfo tensorInfo4({numUnits}, armnn::DataType::Float32);
927  armnn::TensorInfo tensorInfo12({numUnits, 3}, armnn::DataType::Float32);
928  armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::DataType::Float32);
929 
930  std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
931  -0.3810434485f, 0.268383264f, -0.009807467424f,
932  -0.3522925403f, -0.24275735512f, -0.28344226125f,
933  0.13512269116f, -0.4932442977f, -0.10039821991f };
934 
935  std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
936  0.386399507f, -0.259465157985f, -0.16545993089f,
937  -0.4230232555f, 0.341664791103f, -0.18127849691f,
938  -0.2277662414f, -0.55275535589f, 0.34184026718f };
939 
940  std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
941  0.53969591851f, 0.23393625035f, -0.27140527306f,
942  0.50009280443f, 0.07511717046f, 0.3998299249f,
943  -0.51717478049f, 0.1889653282f, -0.367323637f };
944 
945  std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
946  -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
947  -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
948  -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
949 
950  std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
951  -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
952  0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
953  0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
954 
955  std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
956  -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
957  0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
958  -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
959 
960  std::vector<float> cellToForgetWeights{ 0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f };
961 
962  std::vector<float> cellToOutputWeights{ -0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f };
963 
964  std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
965 
966  std::vector<float> cellBias = { 0., 0., 0., 0. };
967 
968  std::vector<float> outputGateBias = { 0., 0., 0., 0. };
969 
970  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo12);
971  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo12);
972  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo12);
973  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
974  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
975  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
976  armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
977  armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
978  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
979  armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
980  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
981 
982  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
983  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
984  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
985  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
986  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
987  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
988  AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
989  AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
990  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
991  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
992  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
993 
994  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
995  data.m_InputToCellWeights = &inputToCellWeightsTensor;
996  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
997  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
998  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
999  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1000  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1001  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1002  data.m_ForgetGateBias = &forgetGateBiasTensor;
1003  data.m_CellBias = &cellBiasTensor;
1004  data.m_OutputGateBias = &outputGateBiasTensor;
1005 
1006  // Flags to set test configuration
1009  data.m_Parameters.m_ActivationFunc = 4;
1010  data.m_Parameters.m_CifgEnabled = true;
1011  data.m_Parameters.m_PeepholeEnabled = true;
1012  data.m_Parameters.m_ProjectionEnabled = false;
1013  data.m_Parameters.m_TimeMajor = false;
1014 
1015  std::unique_ptr<armnn::IWorkload> workload
1016  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
1017  inputHandle->Allocate();
1018  outputStateInHandle->Allocate();
1019  cellStateInHandle->Allocate();
1020 
1021  outputHandle->Allocate();
1022 
1023  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1024  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1025  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1026 
1027  workload->Execute();
1028 
1029  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1030 
1031  return LayerTestResult<float, 3>(actualOutput,
1032  outputVector,
1033  outputHandle->GetShape(),
1034  outputTensorInfo.GetShape());
1035 }
1036 
1038  armnn::IWorkloadFactory& workloadFactory,
1040  const armnn::ITensorHandleFactory& tensorHandleFactory)
1041 {
1042  IgnoreUnused(memoryManager);
1043  unsigned int batchSize = 3;
1044  unsigned int timeSize = 2;
1045  unsigned int inputSize = 3;
1046  unsigned int outputSize = 4;
1047  unsigned numUnits = outputSize;
1048 
1049  armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1050  armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
1051  armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
1052 
1053  armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1054 
1055  const std::vector<float> inputVector = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1056  0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1057  0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1058 
1059  std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1060  std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1061 
1062  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1063 
1064  const std::vector<float> outputVector = { -0.0142517f, -0.0198845f, -0.0120569f, -0.0116868f,
1065  -0.0350714f, -0.0343202f, -0.047504f, -0.0569789f,
1066  -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
1067  -0.0294759f, -0.0129935f, -0.0444175f, -0.0444354f,
1068  -0.0280855f, 0.00545101f, -0.051422f, -0.0463838f,
1069  -0.0310702f, 0.00915739f, -0.0625207f, -0.0482648f };
1070 
1071  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1072  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1073  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1074  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1075  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1076 
1077  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1078 
1080  armnn::WorkloadInfo info;
1081 
1082  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1083  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1084  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1085 
1086  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1087 
1088  armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1089  armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1090  armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1091 
1092  std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
1093  std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1094  std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1095  std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1096 
1097  std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
1098  std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1099  std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1100  std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1101 
1102  std::vector<float> inputGateBias = { 0., 0., 0., 0. };
1103  std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
1104  std::vector<float> cellBias = { 0., 0., 0., 0. };
1105  std::vector<float> outputGateBias = { 0., 0., 0., 0. };
1106 
1107  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfoNumInput);
1108  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1109  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1110  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1111  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfoNumOutput);
1112  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1113  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1114  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1115  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfoNumFp);
1116  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1117  armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1118  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1119 
1120  AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1121  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1122  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1123  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1124  AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1125  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1126  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1127  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1128  AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1129  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1130  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1131  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1132 
1133  data.m_InputToInputWeights = &inputToInputWeightsTensor;
1134  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1135  data.m_InputToCellWeights = &inputToCellWeightsTensor;
1136  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1137  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1138  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1139  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1140  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1141  data.m_InputGateBias = &inputGateBiasTensor;
1142  data.m_ForgetGateBias = &forgetGateBiasTensor;
1143  data.m_CellBias = &cellBiasTensor;
1144  data.m_OutputGateBias = &outputGateBiasTensor;
1145 
1146  // Flags to set test configuration
1149  data.m_Parameters.m_ActivationFunc = 4;
1150  data.m_Parameters.m_CifgEnabled = false;
1151  data.m_Parameters.m_PeepholeEnabled = false;
1152  data.m_Parameters.m_ProjectionEnabled = false;
1153  data.m_Parameters.m_TimeMajor = false;
1154 
1155  std::unique_ptr<armnn::IWorkload> workload
1156  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
1157  inputHandle->Allocate();
1158  outputStateInHandle->Allocate();
1159  cellStateInHandle->Allocate();
1160 
1161  outputHandle->Allocate();
1162 
1163  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1164  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1165  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1166 
1167  workload->Execute();
1168 
1169  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1170 
1171  return LayerTestResult<float, 3>(actualOutput,
1172  outputVector,
1173  outputHandle->GetShape(),
1174  outputTensorInfo.GetShape());
1175 }
1176 
1178  armnn::IWorkloadFactory& workloadFactory,
1180  const armnn::ITensorHandleFactory& tensorHandleFactory)
1181 {
1182  IgnoreUnused(memoryManager);
1183  unsigned int batchSize = 3;
1184  unsigned int timeSize = 2;
1185  unsigned int inputSize = 3;
1186  unsigned int outputSize = 4;
1187  unsigned numUnits = outputSize;
1188 
1189  armnn::TensorInfo inputTensorInfo({timeSize, batchSize, inputSize}, armnn::DataType::Float32);
1190  armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
1191  armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
1192 
1193  armnn::TensorInfo outputTensorInfo({timeSize, batchSize, outputSize}, armnn::DataType::Float32);
1194 
1195  const std::vector<float> inputVector = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1196  0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1197  0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1198 
1199  std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1200  std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1201 
1202  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1203 
1204  const std::vector<float> outputVector = { -0.0142517f, -0.0198845f, -0.0120122f, -0.0116868f,
1205  -0.0261295f, -0.0188487f, -0.0345463f, -0.049733f,
1206  -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
1207  -0.0291863f, -0.0369402f, -0.0354071f, -0.0296529f,
1208  -0.0419539f, -0.00617731f, -0.0814796f, -0.0804005f,
1209  -0.0244737f, 0.0119905f, -0.0457527f, -0.0331862f };
1210  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1211  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1212  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1213  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1214  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1215 
1216  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1217 
1219  armnn::WorkloadInfo info;
1220 
1221  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1222  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1223  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1224 
1225  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1226 
1227  armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1228  armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1229  armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1230 
1231  std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
1232  std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1233  std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1234  std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1235 
1236  std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
1237  std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1238  std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1239  std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1240 
1241 
1242  std::vector<float> inputGateBias = { 0., 0., 0., 0. };
1243  std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
1244  std::vector<float> cellBias = { 0., 0., 0., 0. };
1245  std::vector<float> outputGateBias = { 0., 0., 0., 0. };
1246 
1247  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfoNumInput);
1248  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1249  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1250  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1251  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfoNumOutput);
1252  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1253  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1254  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1255  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfoNumFp);
1256  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1257  armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1258  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1259 
1260  AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1261  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1262  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1263  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1264  AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1265  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1266  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1267  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1268  AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1269  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1270  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1271  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1272 
1273  data.m_InputToInputWeights = &inputToInputWeightsTensor;
1274  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1275  data.m_InputToCellWeights = &inputToCellWeightsTensor;
1276  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1277  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1278  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1279  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1280  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1281  data.m_InputGateBias = &inputGateBiasTensor;
1282  data.m_ForgetGateBias = &forgetGateBiasTensor;
1283  data.m_CellBias = &cellBiasTensor;
1284  data.m_OutputGateBias = &outputGateBiasTensor;
1285 
1286  // Flags to set test configuration
1289  data.m_Parameters.m_ActivationFunc = 4;
1290  data.m_Parameters.m_CifgEnabled = false;
1291  data.m_Parameters.m_PeepholeEnabled = false;
1292  data.m_Parameters.m_ProjectionEnabled = false;
1293  data.m_Parameters.m_TimeMajor = true;
1294 
1295  std::unique_ptr<armnn::IWorkload> workload
1296  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
1297  inputHandle->Allocate();
1298  outputStateInHandle->Allocate();
1299  cellStateInHandle->Allocate();
1300 
1301  outputHandle->Allocate();
1302 
1303  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1304  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1305  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1306 
1307  workload->Execute();
1308 
1309  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1310 
1311  return LayerTestResult<float, 3>(actualOutput,
1312  outputVector,
1313  outputHandle->GetShape(),
1314  outputTensorInfo.GetShape());
1315 }
1316 
1318  armnn::IWorkloadFactory& workloadFactory,
1320  const armnn::ITensorHandleFactory& tensorHandleFactory)
1321 {
1322  IgnoreUnused(memoryManager);
1323  unsigned int batchSize = 3;
1324  unsigned int timeSize = 2;
1325  unsigned int outputSize = 4;
1326  unsigned int inputSize = 3;
1327  unsigned numUnits = 4;
1328 
1329  armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1330  armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
1331  armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
1332  armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1333 
1334  const std::vector<float> inputVector = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1335  0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1336  0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1337 
1338  std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1339  std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1340 
1341  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1342 
1343  const std::vector<float> expectedOutput = { 0.612103f, 1.56788f, 0.31966f, 1.42956f,
1344  0.909718f, 3.07916f, -0.560586f, 3.8907f,
1345  0.753671f, 1.77485f, 0.365122f, 1.60077f,
1346  0.812644f, 2.79092f, -0.605396f, 3.61742f,
1347  0.791857f, 1.64353f, 0.316588f, 1.55192f,
1348  0.807265f, 2.47012f, -0.539598f, 3.25654f };
1349 
1350  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1351  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1352  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1353  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1354  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1355  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1356 
1358  armnn::WorkloadInfo info;
1359 
1360  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1361  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1362  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1363  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1364 
1365  armnn::TensorInfo tensorInfoOut({outputSize}, armnn::DataType::Float32);
1366  armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1367  armnn::TensorInfo tensorInfoNum({numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1368  armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1369  armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1370  armnn::TensorInfo tensorInfoOutNum({outputSize, numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1371 
1372  std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
1373  std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1374  std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1375  std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1376 
1377  std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
1378  std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1379  std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1380  std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1381 
1382  std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f};
1383  std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.3032477f, 0.23027696f};
1384  std::vector<float> cellBias = { -0.124379363f, 0.55531194f, 0.23377132f, 0.033463873f };
1385  std::vector<float> outputGateBias = { 0.046159424f, -0.12809046f, 0.03563469f, 0.12648113f };
1386 
1387  std::vector<int8_t> cellToInputWeights = { 5, 10, 25, 15 };
1388  std::vector<int8_t> cellToForgetWeights = { -5, 15, 25, 3 };
1389  std::vector<int8_t> cellToOutputWeights = { 10, -10, -5, 50 };
1390 
1391  std::vector<int8_t> projectionWeights = { -25, 51, 3, -5, 25, 127, 77, 20, 18, 51, -10, 51, -25, 88, 77, -13 };
1392 
1393  std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
1394 
1395  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfoNumInput);
1396  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1397  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1398  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1399  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1400  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfoNumOutput);
1401  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1402  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1403  armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfoNum);
1404  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfoNumFp);
1405  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1406  armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1407  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1408  armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNum);
1409  armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNum);
1410  armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfoOutNum);
1411  armnn::ScopedTensorHandle projectionBiasTensor(tensorInfoOut);
1412 
1413  AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1414  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1415  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1416  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1417  AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1418  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1419  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1420  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1421  AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
1422  AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1423  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1424  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1425  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1426  AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
1427  AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
1428  AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
1429  AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
1430 
1431  data.m_InputToInputWeights = &inputToInputWeightsTensor;
1432  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1433  data.m_InputToCellWeights = &inputToCellWeightsTensor;
1434  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1435  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1436  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1437  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1438  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1439  data.m_CellToInputWeights = &cellToInputWeightsTensor;
1440  data.m_InputGateBias = &inputGateBiasTensor;
1441  data.m_ForgetGateBias = &forgetGateBiasTensor;
1442  data.m_CellBias = &cellBiasTensor;
1443  data.m_OutputGateBias = &outputGateBiasTensor;
1444  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1445  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1446  data.m_ProjectionWeights = &projectionWeightsTensor;
1447  data.m_ProjectionBias = &projectionBiasTensor;
1448 
1449  // Flags to set test configuration
1450  data.m_Parameters.m_ActivationFunc = 4;
1451  data.m_Parameters.m_CifgEnabled = false;
1452  data.m_Parameters.m_PeepholeEnabled = true;
1453  data.m_Parameters.m_ProjectionEnabled = true;
1454  data.m_Parameters.m_LayerNormEnabled = false;
1455  data.m_Parameters.m_TimeMajor = false;
1456  data.m_Parameters.m_ClippingThresCell = 10.0f;
1457 
1458 
1459  std::unique_ptr<armnn::IWorkload> workload
1460  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
1461  inputHandle->Allocate();
1462  outputStateInHandle->Allocate();
1463  cellStateInHandle->Allocate();
1464  outputHandle->Allocate();
1465 
1466  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1467  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1468  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1469 
1470  workload->Execute();
1471 
1472  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1473 
1474  return LayerTestResult<float, 3>(actualOutput,
1475  expectedOutput,
1476  outputHandle->GetShape(),
1477  outputTensorInfo.GetShape());
1478 }
1479 
1481  armnn::IWorkloadFactory& workloadFactory,
1483  const armnn::ITensorHandleFactory& tensorHandleFactory)
1484 {
1485  IgnoreUnused(memoryManager);
1486  unsigned int batchSize = 3;
1487  unsigned int timeSize = 2;
1488  unsigned int outputSize = 4;
1489  unsigned int inputSize = 3;
1490  unsigned numUnits = 5;
1491 
1492  armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1493  armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
1494  armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
1495  armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1496 
1497  const std::vector<float> inputVector = { 1., 8., 3., 4., 5., 4.,
1498  3., 2., 1., 2., 3., 4.,
1499  5., 4., 3., 2., 1., 2. };
1500 
1501  std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1502  std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1503 
1504  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1505 
1506  const std::vector<float> expectedOutput = { 0.0471276f, 0.0168155f, 0.0789885f, 0.16550f,
1507  0.0643133f, -0.0400722f, 0.100593f, 0.197722f,
1508  0.0465562f, -0.0600682f, 0.0622087f, 0.115053f,
1509  0.056287f, -0.0566218f, 0.0856832f, 0.148484f,
1510  0.0457859f, -0.0588112f, 0.0623636f, 0.114333f,
1511  0.0509271f, -0.0754262f, 0.058600f, 0.0801288f };
1512 
1513  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1514  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1515  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1516  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1517  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1518 
1519  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1520 
1522  armnn::WorkloadInfo info;
1523 
1524  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1525  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1526  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1527 
1528  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1529 
1530  armnn::TensorInfo tensorInfoOut({outputSize}, armnn::DataType::Float32);
1531  armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1532  armnn::TensorInfo tensorInfoNum({numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1533  armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1534  armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1535  armnn::TensorInfo tensorInfoOutNum({outputSize, numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1536 
1537  std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3, 2, 2, -4 };
1538  std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1, -3, -2, -4 };
1539  std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3, 2, 5, -4 };
1540  std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4, -4, -1, -1 };
1541 
1542  std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
1543  std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
1544  std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
1545  std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
1546 
1547  std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
1548  5, -1, 1, 3, -1, -1, -1, 4, 2, 3 };
1549 
1550  std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
1551  5, -1, 1, 3, -2, -1, -1, 2, 2, 1 };
1552 
1553  std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2,
1554  1, 2, 3, -2, 3, -3, -1, -5, 1, 3 };
1555 
1556  std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3,
1557  -4, -1, -1, -1, 2, -1, 5, 1, -3, -4 };
1558 
1559  std::vector<int8_t> cellToInputWeights = { 5, 3, 8, -5, 2 };
1560  std::vector<int8_t> cellToForgetWeights = { -2, -7, 5, -3, 4 };
1561  std::vector<int8_t> cellToOutputWeights = { 9, -10 , -5, 5, 1 };
1562 
1563  std::vector<int8_t> projectionWeights = { -1, 2, 1, -2, 1, 5, 3, 8, 7, 2,
1564  -4, 2, 5, -4, 3, -2, 3, 8, -7, 2 };
1565 
1566  std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
1567 
1568  std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, -0.3f, -0.1f, 0.5f };
1569  std::vector<float> forgetLayerNormWeights = { -0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
1570  std::vector<float> cellLayerNormWeights = { 0.5f, 0.2f, 0.3f, 0.4f, -0.5f };
1571  std::vector<float> outputLayerNormWeights = { 0.6f, -0.2f, -0.2f, 0.5f, 0.1f };
1572 
1573  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfoNumInput);
1574  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1575  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1576  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1577  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1578  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfoNumOutput);
1579  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1580  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1581  armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfoNum);
1582  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfoNumFp);
1583  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1584  armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1585  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1586  armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNum);
1587  armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNum);
1588  armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfoOutNum);
1589  armnn::ScopedTensorHandle projectionBiasTensor(tensorInfoOut);
1590 
1591  armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfoNumFp);
1592  armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfoNumFp);
1593  armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfoNumFp);
1594  armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfoNumFp);
1595 
1596  AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1597  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1598  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1599  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1600  AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1601  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1602  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1603  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1604  AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
1605  AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1606  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1607  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1608  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1609  AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
1610  AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
1611  AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
1612  AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
1613 
1614  AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data());
1615  AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data());
1616  AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data());
1617  AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data());
1618 
1619  data.m_InputToInputWeights = &inputToInputWeightsTensor;
1620  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1621  data.m_InputToCellWeights = &inputToCellWeightsTensor;
1622  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1623  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1624  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1625  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1626  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1627  data.m_CellToInputWeights = &cellToInputWeightsTensor;
1628  data.m_InputGateBias = &inputGateBiasTensor;
1629  data.m_ForgetGateBias = &forgetGateBiasTensor;
1630  data.m_CellBias = &cellBiasTensor;
1631  data.m_OutputGateBias = &outputGateBiasTensor;
1632  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1633  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1634  data.m_ProjectionWeights = &projectionWeightsTensor;
1635  data.m_ProjectionBias = &projectionBiasTensor;
1636 
1637  data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
1638  data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
1639  data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
1640  data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
1641 
1642  // Flags to set test configuration
1643  data.m_Parameters.m_ActivationFunc = 4;
1644  data.m_Parameters.m_CifgEnabled = false;
1645  data.m_Parameters.m_PeepholeEnabled = true;
1646  data.m_Parameters.m_ProjectionEnabled = true;
1647  data.m_Parameters.m_LayerNormEnabled = true;
1648  data.m_Parameters.m_TimeMajor = false;
1649  data.m_Parameters.m_ClippingThresCell = 10.0f;
1650 
1651  std::unique_ptr<armnn::IWorkload> workload
1652  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
1653  inputHandle->Allocate();
1654  outputStateInHandle->Allocate();
1655  cellStateInHandle->Allocate();
1656  outputHandle->Allocate();
1657 
1658  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1659  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1660  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1661 
1662  workload->Execute();
1663 
1664  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1665 
1666  return LayerTestResult<float, 3>(actualOutput,
1667  expectedOutput,
1668  outputHandle->GetShape(),
1669  outputTensorInfo.GetShape());
1670 }
1671 
1673  armnn::IWorkloadFactory& workloadFactory,
1675  const armnn::ITensorHandleFactory& tensorHandleFactory)
1676 {
1677  IgnoreUnused(memoryManager);
1678  unsigned int batchSize = 3;
1679  unsigned int timeSize = 2;
1680  unsigned int inputSize = 3;
1681  unsigned int outputSize = 4;
1682  unsigned numUnits = outputSize;
1683 
1684  armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1685  armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
1686  armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
1687 
1688  armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1689 
1690  const std::vector<float> inputVector = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1691  0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1692  0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1693 
1694  std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1695  std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1696 
1697  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1698 
1699  const std::vector<float> outputVector = { -0.0072104f, -0.00991171f, -0.00650478f, -0.00713055f,
1700  -0.0191782f, -0.0161269f, -0.0233683f, -0.054299f,
1701  -0.00783725f, 0.00635271f, -0.0126718f, -0.022613f,
1702  -0.0161351f, -0.00775868f, -0.021054f, -0.0339778f,
1703  -0.0146392f, 0.00330261f, -0.0258733f, -0.0407797f,
1704  -0.0174297f, 0.0050105f, -0.0266275f, -0.0362564f };
1705 
1706  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1707  std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1708  tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1709  std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1710  tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1711 
1712  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1713 
1715  armnn::WorkloadInfo info;
1716 
1717  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1718  AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1719  AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1720 
1721  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1722 
1723  armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1724  armnn::TensorInfo tensorInfoNum({numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1725  armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1726  armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1727 
1728  std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1729  std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1730  std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1731 
1732  std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1733  std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1734  std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1735 
1736  std::vector<int8_t> cellToForgetWeights = { 47, -52, -24, 31 };
1737  std::vector<int8_t> cellToOutputWeights = { -17, 82, 85, -77 };
1738 
1739  std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
1740  std::vector<float> cellBias = { 0., 0., 0., 0. };
1741  std::vector<float> outputGateBias = { 0., 0., 0., 0. };
1742 
1743  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1744  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1745  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1746  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1747  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1748  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1749  armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNum);
1750  armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNum);
1751  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1752  armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1753  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1754 
1755  AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1756  AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1757  AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1758  AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1759  AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1760  AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1761  AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
1762  AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
1763  AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1764  AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1765  AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1766 
1767  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1768  data.m_InputToCellWeights = &inputToCellWeightsTensor;
1769  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1770  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1771  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1772  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1773  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1774  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1775  data.m_ForgetGateBias = &forgetGateBiasTensor;
1776  data.m_CellBias = &cellBiasTensor;
1777  data.m_OutputGateBias = &outputGateBiasTensor;
1778 
1779  // Flags to set test configuration
1782  data.m_Parameters.m_ActivationFunc = 4;
1783  data.m_Parameters.m_CifgEnabled = true;
1784  data.m_Parameters.m_PeepholeEnabled = true;
1785  data.m_Parameters.m_ProjectionEnabled = false;
1786  data.m_Parameters.m_TimeMajor = false;
1787 
1788  std::unique_ptr<armnn::IWorkload> workload
1789  = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
1790  inputHandle->Allocate();
1791  outputStateInHandle->Allocate();
1792  cellStateInHandle->Allocate();
1793 
1794  outputHandle->Allocate();
1795 
1796  CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1797  CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1798  CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1799 
1800  workload->Execute();
1801 
1802  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1803 
1804  return LayerTestResult<float, 3>(actualOutput,
1805  outputVector,
1806  outputHandle->GetShape(),
1807  outputTensorInfo.GetShape());
1808 }
bool m_ProjectionEnabled
Enable/disable the projection layer.
float m_ClippingThresProj
Clipping threshold value for the projection.
LayerTestResult< float, 3 > UnidirectionalSequenceLstmLayerInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 3 > UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool m_TimeMajor
Enable/disable time major.
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 3 > UnidirectionalSequenceLstmLayerFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 3 > UnidirectionalSequenceLstmLayerInt8TimeMajorTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 3 > UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
DataType
Definition: Types.hpp:35
LayerTestResult< float, 3 > UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 3 > UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool m_PeepholeEnabled
Enable/disable peephole.
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 3 > UnidirectionalSequenceLstmLayerInt8NoCifgWithPeepholeWithProjectionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_ActivationFunc
The activation function to use.
float m_ClippingThresCell
Clipping threshold value for the cell state.
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
LayerTestResult< float, 3 > UnidirectionalSequenceLstmLayerFloat32TimeMajorTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
LayerTestResult< float, 3 > UnidirectionalSequenceLstmLayerInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
const ConstTensorHandle * m_RecurrentToOutputWeights
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
const ConstTensorHandle * m_RecurrentToForgetWeights