ArmNN  NotReleased
WorkloadDataValidation.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "WorkloadTestUtils.hpp"
7 
8 #include <armnn/Exceptions.hpp>
9 
12 
15 
16 #include <boost/test/unit_test.hpp>
17 
18 using namespace armnn;
19 
20 BOOST_AUTO_TEST_SUITE(WorkloadInfoValidation)
21 
22 
23 
24 BOOST_AUTO_TEST_CASE(QueueDescriptor_Validate_WrongNumOfInputsOutputs)
25 {
26  InputQueueDescriptor invalidData;
27  WorkloadInfo invalidInfo;
28  //Invalid argument exception is expected, because no inputs and no outputs were defined.
29  BOOST_CHECK_THROW(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
30 }
31 
32 BOOST_AUTO_TEST_CASE(RefPooling2dFloat32Workload_Validate_WrongDimTensor)
33 {
34  armnn::TensorInfo inputTensorInfo;
35  armnn::TensorInfo outputTensorInfo;
36 
37  unsigned int inputShape[] = {2, 3, 4}; // <- Invalid - input tensor has to be 4D.
38  unsigned int outputShape[] = {2, 3, 4, 5};
39 
40  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
41  inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::DataType::Float32);
42 
43  Pooling2dQueueDescriptor invalidData;
44  WorkloadInfo invalidInfo;
45 
46  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
47  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
48 
49  // Invalid argument exception is expected, input tensor has to be 4D.
50  BOOST_CHECK_THROW(RefPooling2dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
51 }
52 
53 BOOST_AUTO_TEST_CASE(SoftmaxQueueDescriptor_Validate_WrongInputHeight)
54 {
55  unsigned int inputHeight = 1;
56  unsigned int inputWidth = 1;
57  unsigned int inputChannels = 4;
58  unsigned int inputNum = 2;
59 
60  unsigned int outputChannels = inputChannels;
61  unsigned int outputHeight = inputHeight + 1; //Makes data invalid - Softmax expects height and width to be 1.
62  unsigned int outputWidth = inputWidth;
63  unsigned int outputNum = inputNum;
64 
65  armnn::TensorInfo inputTensorInfo;
66  armnn::TensorInfo outputTensorInfo;
67 
68  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
69  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
70 
71  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
72  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
73 
74  SoftmaxQueueDescriptor invalidData;
75  WorkloadInfo invalidInfo;
76 
77  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
78  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
79 
80  //Invalid argument exception is expected, because height != 1.
81  BOOST_CHECK_THROW(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
82 }
83 
84 BOOST_AUTO_TEST_CASE(FullyConnectedQueueDescriptor_Validate_RequiredDataMissing)
85 {
86  unsigned int inputWidth = 1;
87  unsigned int inputHeight = 1;
88  unsigned int inputChannels = 5;
89  unsigned int inputNum = 2;
90 
91  unsigned int outputWidth = 1;
92  unsigned int outputHeight = 1;
93  unsigned int outputChannels = 3;
94  unsigned int outputNum = 2;
95 
96  // Define the tensor descriptors.
97  armnn::TensorInfo inputTensorInfo;
98  armnn::TensorInfo outputTensorInfo;
99  armnn::TensorInfo weightsDesc;
100  armnn::TensorInfo biasesDesc;
101 
102  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
103  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
104  unsigned int weightsShape[] = { 1, 1, inputChannels, outputChannels };
105  unsigned int biasShape[] = { 1, outputChannels, outputHeight, outputWidth };
106 
107  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
108  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
109  weightsDesc = armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32);
110  biasesDesc = armnn::TensorInfo(4, biasShape, armnn::DataType::Float32);
111 
112  FullyConnectedQueueDescriptor invalidData;
113  WorkloadInfo invalidInfo;
114 
115  ScopedCpuTensorHandle weightTensor(weightsDesc);
116  ScopedCpuTensorHandle biasTensor(biasesDesc);
117 
118  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
119  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
120  invalidData.m_Weight = &weightTensor;
121  invalidData.m_Bias = &biasTensor;
122  invalidData.m_Parameters.m_BiasEnabled = true;
123  invalidData.m_Parameters.m_TransposeWeightMatrix = false;
124 
125 
126  //Invalid argument exception is expected, because not all required fields have been provided.
127  //In particular inputsData[0], outputsData[0] and weightsData can not be null.
128  BOOST_CHECK_THROW(RefFullyConnectedWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
129 }
130 
131 
132 BOOST_AUTO_TEST_CASE(NormalizationQueueDescriptor_Validate_WrongInputHeight)
133 {
134  constexpr unsigned int inputNum = 5;
135  constexpr unsigned int inputHeight = 32;
136  constexpr unsigned int inputWidth = 24;
137  constexpr unsigned int inputChannels = 3;
138 
139  constexpr unsigned int outputNum = inputNum;
140  constexpr unsigned int outputChannels = inputChannels;
141  constexpr unsigned int outputHeight = inputHeight + 1; //Makes data invalid - normalization requires.
142  //Input and output to have the same dimensions.
143  constexpr unsigned int outputWidth = inputWidth;
144 
145 
146  armnn::TensorInfo inputTensorInfo;
147  armnn::TensorInfo outputTensorInfo;
148 
149  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
150  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
151 
152  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
153  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
154 
155 
158  float alpha = 1.f;
159  float beta = 1.f;
160  float kappa = 1.f;
161  uint32_t normSize = 5;
162 
163  NormalizationQueueDescriptor invalidData;
164  WorkloadInfo invalidInfo;
165 
166  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
167  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
168  invalidData.m_Parameters.m_NormChannelType = normChannel;
169  invalidData.m_Parameters.m_NormMethodType = normMethod;
170  invalidData.m_Parameters.m_NormSize = normSize;
171  invalidData.m_Parameters.m_Alpha = alpha;
172  invalidData.m_Parameters.m_Beta = beta;
173  invalidData.m_Parameters.m_K = kappa;
174 
175  //Invalid argument exception is expected, because input height != output height.
176  BOOST_CHECK_THROW(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
177 }
178 
179 BOOST_AUTO_TEST_CASE(SplitterQueueDescriptor_Validate_WrongWindow)
180 {
181  constexpr unsigned int inputNum = 1;
182  constexpr unsigned int inputHeight = 32;
183  constexpr unsigned int inputWidth = 24;
184  constexpr unsigned int inputChannels = 3;
185 
186  constexpr unsigned int outputNum = inputNum;
187  constexpr unsigned int outputChannels = inputChannels;
188  constexpr unsigned int outputHeight = 18;
189  constexpr unsigned int outputWidth = inputWidth;
190 
191 
192  armnn::TensorInfo inputTensorInfo;
193  armnn::TensorInfo outputTensorInfo;
194 
195  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
196  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
197 
198  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
199  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
200 
201  SplitterQueueDescriptor invalidData;
202  WorkloadInfo invalidInfo;
203 
204  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
205  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
206 
207  // Invalid, since it has only 3 dimensions while the input tensor is 4d.
208  std::vector<unsigned int> wOrigin = {0, 0, 0};
210  invalidData.m_ViewOrigins.push_back(window);
211 
212  BOOST_TEST_INFO("Invalid argument exception is expected, because split window dimensionality does not "
213  "match input.");
214  BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
215 
216  // Invalid, since window extends past the boundary of input tensor.
217  std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
219  invalidData.m_ViewOrigins[0] = window3;
220  BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
221  BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
222 
223 
224  std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
226  invalidData.m_ViewOrigins[0] = window4;
227 
228  std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
230  invalidData.m_ViewOrigins.push_back(window5);
231 
232  BOOST_TEST_INFO("Invalid exception due to number of split windows not matching number of outputs.");
233  BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
234 }
235 
236 
237 BOOST_AUTO_TEST_CASE(ConcatQueueDescriptor_Validate_WrongWindow)
238 {
239  constexpr unsigned int inputNum = 1;
240  constexpr unsigned int inputChannels = 3;
241  constexpr unsigned int inputHeight = 32;
242  constexpr unsigned int inputWidth = 24;
243 
244  constexpr unsigned int outputNum = 1;
245  constexpr unsigned int outputChannels = 3;
246  constexpr unsigned int outputHeight = 32;
247  constexpr unsigned int outputWidth = 24;
248 
249 
250  armnn::TensorInfo inputTensorInfo;
251  armnn::TensorInfo outputTensorInfo;
252 
253  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
254  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
255 
256  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
257  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
258 
259  ConcatQueueDescriptor invalidData;
260  WorkloadInfo invalidInfo;
261 
262  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
263  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
264 
265  // Invalid, since it has only 3 dimensions while the input tensor is 4d.
266  std::vector<unsigned int> wOrigin = {0, 0, 0};
268  invalidData.m_ViewOrigins.push_back(window);
269 
270  BOOST_TEST_INFO("Invalid argument exception is expected, because merge window dimensionality does not "
271  "match input.");
272  BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
273 
274  // Invalid, since window extends past the boundary of output tensor.
275  std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
277  invalidData.m_ViewOrigins[0] = window3;
278  BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
279  BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
280 
281 
282  std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
284  invalidData.m_ViewOrigins[0] = window4;
285 
286  std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
288  invalidData.m_ViewOrigins.push_back(window5);
289 
290  BOOST_TEST_INFO("Invalid exception due to number of merge windows not matching number of inputs.");
291  BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
292 }
293 
294 BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputNumbers)
295 {
296  armnn::TensorInfo input1TensorInfo;
297  armnn::TensorInfo input2TensorInfo;
298  armnn::TensorInfo input3TensorInfo;
299  armnn::TensorInfo outputTensorInfo;
300 
301  unsigned int shape[] = {1, 1, 1, 1};
302 
303  input1TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
304  input2TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
305  input3TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
306  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
307 
308  AdditionQueueDescriptor invalidData;
309  WorkloadInfo invalidInfo;
310 
311  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
312  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
313 
314  // Too few inputs.
315  BOOST_CHECK_THROW(RefAdditionWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
316 
317  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
318 
319  // Correct.
320  BOOST_CHECK_NO_THROW(RefAdditionWorkload(invalidData, invalidInfo));
321 
322  AddInputToWorkload(invalidData, invalidInfo, input3TensorInfo, nullptr);
323 
324  // Too many inputs.
325  BOOST_CHECK_THROW(RefAdditionWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
326 }
327 
328 BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputShapes)
329 {
330  armnn::TensorInfo input1TensorInfo;
331  armnn::TensorInfo input2TensorInfo;
332  armnn::TensorInfo outputTensorInfo;
333 
334  unsigned int shape1[] = {1, 1, 2, 1};
335  unsigned int shape2[] = {1, 1, 3, 2};
336 
337  // Incompatible shapes even with broadcasting.
338  {
339  input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
340  input2TensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
341  outputTensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
342 
343  AdditionQueueDescriptor invalidData;
344  WorkloadInfo invalidInfo;
345 
346  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
347  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
348  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
349 
350  BOOST_CHECK_THROW(RefAdditionWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
351  }
352 
353  // Output size not compatible with input sizes.
354  {
355  input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
356  input2TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
357  outputTensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
358 
359  AdditionQueueDescriptor invalidData;
360  WorkloadInfo invalidInfo;
361 
362  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
363  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
364  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
365 
366  // Output differs.
367  BOOST_CHECK_THROW(RefAdditionWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
368  }
369 }
370 
371 BOOST_AUTO_TEST_CASE(MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch)
372 {
373  armnn::TensorInfo input0TensorInfo;
374  armnn::TensorInfo input1TensorInfo;
375  armnn::TensorInfo outputTensorInfo;
376 
377  constexpr unsigned int input0Shape[] = { 2, 2, 4, 4 };
378  constexpr std::size_t dimensionCount = std::extent<decltype(input0Shape)>::value;
379 
380  // Checks dimension consistency for input tensors.
381  for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
382  {
383  unsigned int input1Shape[dimensionCount];
384  for (unsigned int i = 0; i < dimensionCount; ++i)
385  {
386  input1Shape[i] = input0Shape[i];
387  }
388 
389  ++input1Shape[dimIndex];
390 
391  input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
392  input1TensorInfo = armnn::TensorInfo(dimensionCount, input1Shape, armnn::DataType::Float32);
393  outputTensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
394 
395  MultiplicationQueueDescriptor invalidData;
396  WorkloadInfo invalidInfo;
397 
398  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
399  AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
400  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
401 
402  BOOST_CHECK_THROW(RefMultiplicationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
403  }
404 
405  // Checks dimension consistency for input and output tensors.
406  for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
407  {
408  unsigned int outputShape[dimensionCount];
409  for (unsigned int i = 0; i < dimensionCount; ++i)
410  {
411  outputShape[i] = input0Shape[i];
412  }
413 
414  ++outputShape[dimIndex];
415 
416  input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
417  input1TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
418  outputTensorInfo = armnn::TensorInfo(dimensionCount, outputShape, armnn::DataType::Float32);
419 
420  MultiplicationQueueDescriptor invalidData;
421  WorkloadInfo invalidInfo;
422 
423  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
424  AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
425  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
426 
427  BOOST_CHECK_THROW(RefMultiplicationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
428  }
429 }
430 
431 BOOST_AUTO_TEST_CASE(ReshapeQueueDescriptor_Validate_MismatchingNumElements)
432 {
433  armnn::TensorInfo inputTensorInfo;
434  armnn::TensorInfo outputTensorInfo;
435 
436  // The input and output shapes should have the same number of elements, but these don't.
437  unsigned int inputShape[] = { 1, 1, 2, 3 };
438  unsigned int outputShape[] = { 1, 1, 1, 2 };
439 
440  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
441  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
442 
443  ReshapeQueueDescriptor invalidData;
444  WorkloadInfo invalidInfo;
445 
446  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
447  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
448 
449  // InvalidArgumentException is expected, because the number of elements don't match.
450  BOOST_CHECK_THROW(RefReshapeWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
451 }
452 
453 
454 BOOST_AUTO_TEST_CASE(LstmQueueDescriptor_Validate)
455 {
457 
458  float qScale = 0.0f;
459  int32_t qOffset = 0;
460 
461  unsigned int batchSize = 2;
462  unsigned int outputSize = 3;
463  unsigned int inputSize = 5;
464  unsigned numUnits = 4;
465 
466  armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, dataType, qScale, qOffset );
467  armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, dataType, qScale, qOffset);
468  armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, dataType, qScale, qOffset);
469 
470  // Scratch buffer size with CIFG [batchSize, numUnits * 4]
471  armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
472  armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, dataType, qScale, qOffset);
473  armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
474  armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
475 
476  armnn::TensorInfo tensorInfo3({outputSize}, dataType, qScale, qOffset);
477  armnn::TensorInfo tensorInfo4({numUnits}, dataType, qScale, qOffset);
478  armnn::TensorInfo tensorInfo4x5({numUnits, inputSize}, dataType, qScale, qOffset);
479  armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, dataType, qScale, qOffset);
480  armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, dataType, qScale, qOffset);
481 
482  LstmQueueDescriptor data;
484 
485  AddInputToWorkload(data, info, inputTensorInfo, nullptr);
486  AddInputToWorkload(data, info, outputStateInTensorInfo, nullptr);
487  AddInputToWorkload(data, info, cellStateInTensorInfo, nullptr);
488 
489  AddOutputToWorkload(data, info, scratchBufferTensorInfo, nullptr);
490  AddOutputToWorkload(data, info, outputStateOutTensorInfo, nullptr);
491  AddOutputToWorkload(data, info, cellStateOutTensorInfo, nullptr);
492  // AddOutputToWorkload(data, info, outputTensorInfo, nullptr); is left out
493 
494  armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
495  armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
496  armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
497  armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
498  armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
499  armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
500  armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
501  armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
502  armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
503  armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
504  armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
505  armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
506  armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
507  armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
508  armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
509  armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
510  armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
511  armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
512  armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
513  armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
514  armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
515 
516  data.m_InputToInputWeights = &inputToInputWeightsTensor;
517  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
518  data.m_InputToCellWeights = &inputToCellWeightsTensor;
519  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
520  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
521  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
522  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
523  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
524  data.m_CellToInputWeights = &cellToInputWeightsTensor;
525  data.m_InputGateBias = &inputGateBiasTensor;
526  data.m_ForgetGateBias = &forgetGateBiasTensor;
527  data.m_CellBias = &cellBiasTensor;
528  data.m_OutputGateBias = &outputGateBiasTensor;
529  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
530  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
531  data.m_ProjectionWeights = &projectionWeightsTensor;
532  data.m_ProjectionBias = &projectionBiasTensor;
533 
534  data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
535  data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
536  data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
537  data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
538 
539  // Flags to set test configuration
540  data.m_Parameters.m_ActivationFunc = 4;
541  data.m_Parameters.m_CifgEnabled = false;
542  data.m_Parameters.m_PeepholeEnabled = true;
543  data.m_Parameters.m_ProjectionEnabled = true;
544  data.m_Parameters.m_LayerNormEnabled = true;
545 
546  // check wrong number of outputs
547  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
548  AddOutputToWorkload(data, info, outputTensorInfo, nullptr);
549 
550  // check wrong cifg parameter configuration
551  data.m_Parameters.m_CifgEnabled = true;
552  armnn::TensorInfo scratchBufferTensorInfo2({batchSize, numUnits * 3}, dataType, qScale, qOffset);
553  SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo2, nullptr);
554  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
555  data.m_Parameters.m_CifgEnabled = false;
556  SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo, nullptr);
557 
558  // check wrong inputGateBias configuration
559  data.m_InputGateBias = nullptr;
560  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
561  data.m_InputGateBias = &inputGateBiasTensor;
562 
563  // check inconsistant projection parameters
564  data.m_Parameters.m_ProjectionEnabled = false;
565  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
566  data.m_Parameters.m_ProjectionEnabled = true;
567  data.m_ProjectionWeights = nullptr;
568  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
569  data.m_ProjectionWeights = &projectionWeightsTensor;
570 
571  // check missing input layer normalisation weights
572  data.m_InputLayerNormWeights = nullptr;
573  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
574  data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
575 
576  // layer norm disabled but normalisation weights are present
577  data.m_Parameters.m_LayerNormEnabled = false;
578  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
579  data.m_Parameters.m_LayerNormEnabled = true;
580 
581  // check invalid outputTensor shape
582  armnn::TensorInfo incorrectOutputTensorInfo({batchSize, outputSize + 1}, dataType, qScale, qOffset);
583  SetWorkloadOutput(data, info, 3, incorrectOutputTensorInfo, nullptr);
584  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
585  SetWorkloadOutput(data, info, 3, outputTensorInfo, nullptr);
586 
587  // check invalid cell clipping parameters
588  data.m_Parameters.m_ClippingThresCell = -1.0f;
589  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
590  data.m_Parameters.m_ClippingThresCell = 0.0f;
591 
592  // check invalid projection clipping parameters
593  data.m_Parameters.m_ClippingThresProj = -1.0f;
594  BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
595  data.m_Parameters.m_ClippingThresProj = 0.0f;
596 
597  // check correct configuration
598  BOOST_CHECK_NO_THROW(data.Validate(info));
599 }
600 
601 BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
602 {
603  constexpr unsigned int nInput = 1u;
604  constexpr unsigned int cInput = 3u;
605  constexpr unsigned int hInput = 3u;
606  constexpr unsigned int wInput = 3u;
607 
608  constexpr unsigned int nOutput = nInput;
609  constexpr unsigned int cOutput = cInput;
610  constexpr unsigned int hOutput = 1u;
611  constexpr unsigned int wOutput = 1u;
612 
613  const TensorShape inputShape { nInput, cInput, hInput, wInput };
614  const TensorShape outputShape{ nOutput, cOutput, hOutput, wOutput };
615  const TensorShape weightShape{ cOutput, cInput, hInput, wInput };
616  const TensorShape biasShape { cOutput };
617 
618  constexpr DataType inputType = DataType::QAsymmU8;
619  constexpr DataType weightType = DataType::QSymmS8;
620  constexpr DataType biasType = DataType::Signed32;
621 
622  constexpr float perTensorScale = 1.5f;
623  const TensorInfo inputInfo (inputShape, inputType, perTensorScale);
624  const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
625 
626  const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
627  const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
628 
629  Convolution2dQueueDescriptor queueDescriptor;
630  queueDescriptor.m_Parameters.m_BiasEnabled = true;
631 
632  WorkloadInfo workloadInfo;
633  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
634  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
635 
636  ScopedCpuTensorHandle weightTensor(weightInfo);
637  queueDescriptor.m_Weight = &weightTensor;
638 
639  // Test 1: correct per-axis quantization values
640  const std::vector<float> biasPerAxisScales1 = { 3.75f, 5.25f };
641  const TensorInfo biasInfo1(biasShape, biasType, biasPerAxisScales1, 0);
642 
643  ScopedCpuTensorHandle biasHandle1(biasInfo1);
644  queueDescriptor.m_Bias = &biasHandle1;
645 
646  BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
647 
648  // Test 2: wrong per-axis quantization values
649  const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
650  const TensorInfo biasInfo2(biasShape, biasType, biasPerAxisScales2, 0);
651 
652  ScopedCpuTensorHandle biasHandle2(biasInfo2);
653  queueDescriptor.m_Bias = &biasHandle2;
654 
655  BOOST_CHECK_THROW(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
656 
657  // Test 3: mismatched number of quantization scales
658  const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
659  const TensorInfo biasInfo3(biasShape, biasType, biasPerAxisScales3, 0);
660 
661  ScopedCpuTensorHandle biasHandle3(biasInfo3);
662  queueDescriptor.m_Bias = &biasHandle3;
663 
664  BOOST_CHECK_THROW(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
665 }
666 
float m_Alpha
Alpha value for the normalization equation.
const ConstCpuTensorHandle * m_Weight
RefElementwiseWorkload< std::plus< float >, AdditionQueueDescriptor, StringMapping::RefAdditionWorkload_Execute > RefAdditionWorkload
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
void Validate(const WorkloadInfo &workloadInfo) const
const ConstCpuTensorHandle * m_Bias
bool m_BiasEnabled
Enable/disable bias.
const ConstCpuTensorHandle * m_Weight
Krichevsky 2012: Local Brightness Normalization.
std::vector< ViewOrigin > m_ViewOrigins
RefElementwiseWorkload< std::multiplies< float >, MultiplicationQueueDescriptor, StringMapping::RefMultiplicationWorkload_Execute > RefMultiplicationWorkload
uint32_t m_NormSize
Depth radius value.
float m_Beta
Beta value for the normalization equation.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
NormalizationAlgorithmMethod
Definition: Types.hpp:129
const ConstCpuTensorHandle * m_Bias
bool m_BiasEnabled
Enable/disable bias.
BOOST_AUTO_TEST_SUITE_END()
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
DataType
Definition: Types.hpp:32
float m_K
Kappa value used for the across channel normalization equation.
NormalizationAlgorithmChannel
Definition: Types.hpp:123
std::vector< ViewOrigin > m_ViewOrigins
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).