ArmNN
 21.11
WorkloadDataValidation.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "WorkloadTestUtils.hpp"
7 
8 #include <armnn/Exceptions.hpp>
9 
12 
15 
16 #include <doctest/doctest.h>
17 
18 using namespace armnn;
19 
20 TEST_SUITE("WorkloadInfoValidation")
21 {
22 TEST_CASE("BatchNormalizationQueueDescriptor_Validate_DifferentQuantizationData")
23 {
24  TensorShape inputShape { 1, 3, 2, 2 };
25  TensorShape outputShape { 1, 3, 2, 2 };
26 
27  TensorInfo inputTensorInfo(inputShape, armnn::DataType::QAsymmU8, .1f, 125);
28  TensorInfo outputTensorInfo(outputShape, armnn::DataType::QAsymmU8, .2f, 120);
29 
31  WorkloadInfo invalidInfo;
32 
33  unsigned int sameShape[] = { 10 };
34  TensorInfo sameInfo = armnn::TensorInfo(1, sameShape, armnn::DataType::QAsymmU8);
35  ScopedTensorHandle sameTensor(sameInfo);
36 
37  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
38  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
39 
40  invalidData.m_Mean = &sameTensor;
41  invalidData.m_Variance = &sameTensor;
42  invalidData.m_Beta= &sameTensor;
43  invalidData.m_Gamma = &sameTensor;
44 
45  CHECK_NOTHROW(RefBatchNormalizationWorkload(invalidData, invalidInfo));
46 }
47 
48 TEST_CASE("QueueDescriptor_Validate_WrongNumOfInputsOutputs")
49 {
50  InputQueueDescriptor invalidData;
51  WorkloadInfo invalidInfo;
52  //Invalid argument exception is expected, because no inputs and no outputs were defined.
53  CHECK_THROWS_AS(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
54 }
55 
56 TEST_CASE("RefPooling2dFloat32Workload_Validate_WrongDimTensor")
57 {
58  armnn::TensorInfo inputTensorInfo;
59  armnn::TensorInfo outputTensorInfo;
60 
61  unsigned int inputShape[] = {2, 3, 4}; // <- Invalid - input tensor has to be 4D.
62  unsigned int outputShape[] = {2, 3, 4, 5};
63 
64  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
65  inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::DataType::Float32);
66 
67  Pooling2dQueueDescriptor invalidData;
68  WorkloadInfo invalidInfo;
69 
70  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
71  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
72 
73  // Invalid argument exception is expected, input tensor has to be 4D.
74  CHECK_THROWS_AS(RefPooling2dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
75 }
76 
77 TEST_CASE("SoftmaxQueueDescriptor_Validate_WrongInputHeight")
78 {
79  unsigned int inputHeight = 1;
80  unsigned int inputWidth = 1;
81  unsigned int inputChannels = 4;
82  unsigned int inputNum = 2;
83 
84  unsigned int outputChannels = inputChannels;
85  unsigned int outputHeight = inputHeight + 1; //Makes data invalid - Softmax expects height and width to be 1.
86  unsigned int outputWidth = inputWidth;
87  unsigned int outputNum = inputNum;
88 
89  armnn::TensorInfo inputTensorInfo;
90  armnn::TensorInfo outputTensorInfo;
91 
92  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
93  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
94 
95  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
96  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
97 
98  SoftmaxQueueDescriptor invalidData;
99  WorkloadInfo invalidInfo;
100 
101  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
102  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
103 
104  //Invalid argument exception is expected, because height != 1.
105  CHECK_THROWS_AS(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
106 }
107 
108 TEST_CASE("FullyConnectedQueueDescriptor_Validate_RequiredDataMissing")
109 {
110  unsigned int inputWidth = 1;
111  unsigned int inputHeight = 1;
112  unsigned int inputChannels = 5;
113  unsigned int inputNum = 2;
114 
115  unsigned int outputWidth = 1;
116  unsigned int outputHeight = 1;
117  unsigned int outputChannels = 3;
118  unsigned int outputNum = 2;
119 
120  // Define the tensor descriptors.
121  armnn::TensorInfo inputTensorInfo;
122  armnn::TensorInfo outputTensorInfo;
123  armnn::TensorInfo weightsDesc;
124  armnn::TensorInfo biasesDesc;
125 
126  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
127  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
128  unsigned int weightsShape[] = { 1, 1, inputChannels, outputChannels };
129  unsigned int biasShape[] = { 1, outputChannels, outputHeight, outputWidth };
130 
131  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
132  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
133  weightsDesc = armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32);
134  biasesDesc = armnn::TensorInfo(4, biasShape, armnn::DataType::Float32);
135 
136  FullyConnectedQueueDescriptor invalidData;
137  WorkloadInfo invalidInfo;
138 
139  ScopedTensorHandle weightTensor(weightsDesc);
140  ScopedTensorHandle biasTensor(biasesDesc);
141 
142  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
143  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
144  invalidData.m_Weight = &weightTensor;
145  invalidData.m_Bias = &biasTensor;
146  invalidData.m_Parameters.m_BiasEnabled = true;
147  invalidData.m_Parameters.m_TransposeWeightMatrix = false;
148 
149 
150  //Invalid argument exception is expected, because not all required fields have been provided.
151  //In particular inputsData[0], outputsData[0] and weightsData can not be null.
152  CHECK_THROWS_AS(RefFullyConnectedWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
153 }
154 
155 
156 TEST_CASE("NormalizationQueueDescriptor_Validate_WrongInputHeight")
157 {
158  constexpr unsigned int inputNum = 5;
159  constexpr unsigned int inputHeight = 32;
160  constexpr unsigned int inputWidth = 24;
161  constexpr unsigned int inputChannels = 3;
162 
163  constexpr unsigned int outputNum = inputNum;
164  constexpr unsigned int outputChannels = inputChannels;
165  constexpr unsigned int outputHeight = inputHeight + 1; //Makes data invalid - normalization requires.
166  //Input and output to have the same dimensions.
167  constexpr unsigned int outputWidth = inputWidth;
168 
169 
170  armnn::TensorInfo inputTensorInfo;
171  armnn::TensorInfo outputTensorInfo;
172 
173  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
174  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
175 
176  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
177  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
178 
179 
182  float alpha = 1.f;
183  float beta = 1.f;
184  float kappa = 1.f;
185  uint32_t normSize = 5;
186 
187  NormalizationQueueDescriptor invalidData;
188  WorkloadInfo invalidInfo;
189 
190  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
191  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
192  invalidData.m_Parameters.m_NormChannelType = normChannel;
193  invalidData.m_Parameters.m_NormMethodType = normMethod;
194  invalidData.m_Parameters.m_NormSize = normSize;
195  invalidData.m_Parameters.m_Alpha = alpha;
196  invalidData.m_Parameters.m_Beta = beta;
197  invalidData.m_Parameters.m_K = kappa;
198 
199  //Invalid argument exception is expected, because input height != output height.
200  CHECK_THROWS_AS(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
201 }
202 
203 TEST_CASE("SplitterQueueDescriptor_Validate_WrongWindow")
204 {
205  constexpr unsigned int inputNum = 1;
206  constexpr unsigned int inputHeight = 32;
207  constexpr unsigned int inputWidth = 24;
208  constexpr unsigned int inputChannels = 3;
209 
210  constexpr unsigned int outputNum = inputNum;
211  constexpr unsigned int outputChannels = inputChannels;
212  constexpr unsigned int outputHeight = 18;
213  constexpr unsigned int outputWidth = inputWidth;
214 
215 
216  armnn::TensorInfo inputTensorInfo;
217  armnn::TensorInfo outputTensorInfo;
218 
219  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
220  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
221 
222  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
223  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
224 
225  SplitterQueueDescriptor invalidData;
226  WorkloadInfo invalidInfo;
227 
228  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
229  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
230 
231  // Invalid, since it has only 3 dimensions while the input tensor is 4d.
232  std::vector<unsigned int> wOrigin = {0, 0, 0};
234  invalidData.m_ViewOrigins.push_back(window);
235 
236  INFO("Invalid argument exception is expected, because split window dimensionality does not match input.");
237  CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
238 
239  // Invalid, since window extends past the boundary of input tensor.
240  std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
242  invalidData.m_ViewOrigins[0] = window3;
243  INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
244  CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
245 
246 
247  std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
249  invalidData.m_ViewOrigins[0] = window4;
250 
251  std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
253  invalidData.m_ViewOrigins.push_back(window5);
254 
255  INFO("Invalid exception due to number of split windows not matching number of outputs.");
256  CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
257 }
258 
259 
260 TEST_CASE("ConcatQueueDescriptor_Validate_WrongWindow")
261 {
262  constexpr unsigned int inputNum = 1;
263  constexpr unsigned int inputChannels = 3;
264  constexpr unsigned int inputHeight = 32;
265  constexpr unsigned int inputWidth = 24;
266 
267  constexpr unsigned int outputNum = 1;
268  constexpr unsigned int outputChannels = 3;
269  constexpr unsigned int outputHeight = 32;
270  constexpr unsigned int outputWidth = 24;
271 
272 
273  armnn::TensorInfo inputTensorInfo;
274  armnn::TensorInfo outputTensorInfo;
275 
276  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
277  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
278 
279  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
280  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
281 
282  ConcatQueueDescriptor invalidData;
283  WorkloadInfo invalidInfo;
284 
285  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
286  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
287 
288  // Invalid, since it has only 3 dimensions while the input tensor is 4d.
289  std::vector<unsigned int> wOrigin = {0, 0, 0};
291  invalidData.m_ViewOrigins.push_back(window);
292 
293  INFO("Invalid argument exception is expected, because merge window dimensionality does not match input.");
294  CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
295 
296  // Invalid, since window extends past the boundary of output tensor.
297  std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
299  invalidData.m_ViewOrigins[0] = window3;
300  INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
301  CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
302 
303 
304  std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
306  invalidData.m_ViewOrigins[0] = window4;
307 
308  std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
310  invalidData.m_ViewOrigins.push_back(window5);
311 
312  INFO("Invalid exception due to number of merge windows not matching number of inputs.");
313  CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
314 }
315 
316 TEST_CASE("AdditionQueueDescriptor_Validate_InputNumbers")
317 {
318  armnn::TensorInfo input1TensorInfo;
319  armnn::TensorInfo input2TensorInfo;
320  armnn::TensorInfo input3TensorInfo;
321  armnn::TensorInfo outputTensorInfo;
322 
323  unsigned int shape[] = {1, 1, 1, 1};
324 
325  input1TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
326  input2TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
327  input3TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
328  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
329 
330  AdditionQueueDescriptor invalidData;
331  WorkloadInfo invalidInfo;
332 
333  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
334  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
335 
336  // Too few inputs.
337  CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
338 
339  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
340 
341  // Correct.
342  CHECK_NOTHROW(RefAdditionWorkload<>(invalidData, invalidInfo));
343 
344  AddInputToWorkload(invalidData, invalidInfo, input3TensorInfo, nullptr);
345 
346  // Too many inputs.
347  CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
348 }
349 
350 TEST_CASE("AdditionQueueDescriptor_Validate_InputShapes")
351 {
352  armnn::TensorInfo input1TensorInfo;
353  armnn::TensorInfo input2TensorInfo;
354  armnn::TensorInfo outputTensorInfo;
355 
356  unsigned int shape1[] = {1, 1, 2, 1};
357  unsigned int shape2[] = {1, 1, 3, 2};
358 
359  // Incompatible shapes even with broadcasting.
360  {
361  input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
362  input2TensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
363  outputTensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
364 
365  AdditionQueueDescriptor invalidData;
366  WorkloadInfo invalidInfo;
367 
368  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
369  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
370  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
371 
372  CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
373  }
374 
375  // Output size not compatible with input sizes.
376  {
377  input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
378  input2TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
379  outputTensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
380 
381  AdditionQueueDescriptor invalidData;
382  WorkloadInfo invalidInfo;
383 
384  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
385  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
386  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
387 
388  // Output differs.
389  CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
390  }
391 }
392 
393 TEST_CASE("MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch")
394 {
395  armnn::TensorInfo input0TensorInfo;
396  armnn::TensorInfo input1TensorInfo;
397  armnn::TensorInfo outputTensorInfo;
398 
399  constexpr unsigned int input0Shape[] = { 2, 2, 4, 4 };
400  constexpr std::size_t dimensionCount = std::extent<decltype(input0Shape)>::value;
401 
402  // Checks dimension consistency for input tensors.
403  for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
404  {
405  unsigned int input1Shape[dimensionCount];
406  for (unsigned int i = 0; i < dimensionCount; ++i)
407  {
408  input1Shape[i] = input0Shape[i];
409  }
410 
411  ++input1Shape[dimIndex];
412 
413  input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
414  input1TensorInfo = armnn::TensorInfo(dimensionCount, input1Shape, armnn::DataType::Float32);
415  outputTensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
416 
417  MultiplicationQueueDescriptor invalidData;
418  WorkloadInfo invalidInfo;
419 
420  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
421  AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
422  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
423 
424  CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
425  }
426 
427  // Checks dimension consistency for input and output tensors.
428  for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
429  {
430  unsigned int outputShape[dimensionCount];
431  for (unsigned int i = 0; i < dimensionCount; ++i)
432  {
433  outputShape[i] = input0Shape[i];
434  }
435 
436  ++outputShape[dimIndex];
437 
438  input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
439  input1TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
440  outputTensorInfo = armnn::TensorInfo(dimensionCount, outputShape, armnn::DataType::Float32);
441 
442  MultiplicationQueueDescriptor invalidData;
443  WorkloadInfo invalidInfo;
444 
445  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
446  AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
447  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
448 
449  CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
450  }
451 }
452 
453 TEST_CASE("ReshapeQueueDescriptor_Validate_MismatchingNumElements")
454 {
455  armnn::TensorInfo inputTensorInfo;
456  armnn::TensorInfo outputTensorInfo;
457 
458  // The input and output shapes should have the same number of elements, but these don't.
459  unsigned int inputShape[] = { 1, 1, 2, 3 };
460  unsigned int outputShape[] = { 1, 1, 1, 2 };
461 
462  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
463  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
464 
465  ReshapeQueueDescriptor invalidData;
466  WorkloadInfo invalidInfo;
467 
468  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
469  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
470 
471  // InvalidArgumentException is expected, because the number of elements don't match.
472  CHECK_THROWS_AS(RefReshapeWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
473 }
474 
475 
476 TEST_CASE("LstmQueueDescriptor_Validate")
477 {
479 
480  float qScale = 0.0f;
481  int32_t qOffset = 0;
482 
483  unsigned int batchSize = 2;
484  unsigned int outputSize = 3;
485  unsigned int inputSize = 5;
486  unsigned numUnits = 4;
487 
488  armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, dataType, qScale, qOffset );
489  armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, dataType, qScale, qOffset);
490  armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, dataType, qScale, qOffset);
491 
492  // Scratch buffer size with CIFG [batchSize, numUnits * 4]
493  armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
494  armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, dataType, qScale, qOffset);
495  armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
496  armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
497 
498  armnn::TensorInfo tensorInfo3({outputSize}, dataType, qScale, qOffset);
499  armnn::TensorInfo tensorInfo4({numUnits}, dataType, qScale, qOffset);
500  armnn::TensorInfo tensorInfo4x5({numUnits, inputSize}, dataType, qScale, qOffset);
501  armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, dataType, qScale, qOffset);
502  armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, dataType, qScale, qOffset);
503 
504  LstmQueueDescriptor data;
506 
507  AddInputToWorkload(data, info, inputTensorInfo, nullptr);
508  AddInputToWorkload(data, info, outputStateInTensorInfo, nullptr);
509  AddInputToWorkload(data, info, cellStateInTensorInfo, nullptr);
510 
511  AddOutputToWorkload(data, info, scratchBufferTensorInfo, nullptr);
512  AddOutputToWorkload(data, info, outputStateOutTensorInfo, nullptr);
513  AddOutputToWorkload(data, info, cellStateOutTensorInfo, nullptr);
514  // AddOutputToWorkload(data, info, outputTensorInfo, nullptr); is left out
515 
516  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
517  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
518  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
519  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
520  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
521  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
522  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
523  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
524  armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
525  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
526  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
527  armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
528  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
529  armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
530  armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
531  armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
532  armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
533  armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
534  armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
535  armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
536  armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
537 
538  data.m_InputToInputWeights = &inputToInputWeightsTensor;
539  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
540  data.m_InputToCellWeights = &inputToCellWeightsTensor;
541  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
542  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
543  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
544  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
545  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
546  data.m_CellToInputWeights = &cellToInputWeightsTensor;
547  data.m_InputGateBias = &inputGateBiasTensor;
548  data.m_ForgetGateBias = &forgetGateBiasTensor;
549  data.m_CellBias = &cellBiasTensor;
550  data.m_OutputGateBias = &outputGateBiasTensor;
551  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
552  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
553  data.m_ProjectionWeights = &projectionWeightsTensor;
554  data.m_ProjectionBias = &projectionBiasTensor;
555 
556  data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
557  data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
558  data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
559  data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
560 
561  // Flags to set test configuration
562  data.m_Parameters.m_ActivationFunc = 4;
563  data.m_Parameters.m_CifgEnabled = false;
564  data.m_Parameters.m_PeepholeEnabled = true;
565  data.m_Parameters.m_ProjectionEnabled = true;
566  data.m_Parameters.m_LayerNormEnabled = true;
567 
568  // check wrong number of outputs
569  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
570  AddOutputToWorkload(data, info, outputTensorInfo, nullptr);
571 
572  // check wrong cifg parameter configuration
573  data.m_Parameters.m_CifgEnabled = true;
574  armnn::TensorInfo scratchBufferTensorInfo2({batchSize, numUnits * 3}, dataType, qScale, qOffset);
575  SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo2, nullptr);
576  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
577  data.m_Parameters.m_CifgEnabled = false;
578  SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo, nullptr);
579 
580  // check wrong inputGateBias configuration
581  data.m_InputGateBias = nullptr;
582  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
583  data.m_InputGateBias = &inputGateBiasTensor;
584 
585  // check inconsistant projection parameters
586  data.m_Parameters.m_ProjectionEnabled = false;
587  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
588  data.m_Parameters.m_ProjectionEnabled = true;
589  data.m_ProjectionWeights = nullptr;
590  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
591  data.m_ProjectionWeights = &projectionWeightsTensor;
592 
593  // check missing input layer normalisation weights
594  data.m_InputLayerNormWeights = nullptr;
595  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
596  data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
597 
598  // layer norm disabled but normalisation weights are present
599  data.m_Parameters.m_LayerNormEnabled = false;
600  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
601  data.m_Parameters.m_LayerNormEnabled = true;
602 
603  // check invalid outputTensor shape
604  armnn::TensorInfo incorrectOutputTensorInfo({batchSize, outputSize + 1}, dataType, qScale, qOffset);
605  SetWorkloadOutput(data, info, 3, incorrectOutputTensorInfo, nullptr);
606  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
607  SetWorkloadOutput(data, info, 3, outputTensorInfo, nullptr);
608 
609  // check invalid cell clipping parameters
610  data.m_Parameters.m_ClippingThresCell = -1.0f;
611  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
612  data.m_Parameters.m_ClippingThresCell = 0.0f;
613 
614  // check invalid projection clipping parameters
615  data.m_Parameters.m_ClippingThresProj = -1.0f;
616  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
617  data.m_Parameters.m_ClippingThresProj = 0.0f;
618 
619  // check correct configuration
620  CHECK_NOTHROW(data.Validate(info));
621 }
622 
623 TEST_CASE("BiasPerAxisQuantization_Validate")
624 {
625  constexpr unsigned int nInput = 1u;
626  constexpr unsigned int cInput = 3u;
627  constexpr unsigned int hInput = 3u;
628  constexpr unsigned int wInput = 3u;
629 
630  constexpr unsigned int nOutput = nInput;
631  constexpr unsigned int cOutput = cInput;
632  constexpr unsigned int hOutput = 1u;
633  constexpr unsigned int wOutput = 1u;
634 
635  const TensorShape inputShape { nInput, cInput, hInput, wInput };
636  const TensorShape outputShape{ nOutput, cOutput, hOutput, wOutput };
637  const TensorShape weightShape{ cOutput, cInput, hInput, wInput };
638  const TensorShape biasShape { cOutput };
639 
640  constexpr DataType inputType = DataType::QAsymmU8;
641  constexpr DataType weightType = DataType::QSymmS8;
642  constexpr DataType biasType = DataType::Signed32;
643 
644  constexpr float perTensorScale = 1.5f;
645  const TensorInfo inputInfo (inputShape, inputType, perTensorScale);
646  const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
647 
648  const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
649  const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
650 
651  Convolution2dQueueDescriptor queueDescriptor;
652  queueDescriptor.m_Parameters.m_BiasEnabled = true;
653 
654  WorkloadInfo workloadInfo;
655  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
656  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
657 
658  ScopedTensorHandle weightTensor(weightInfo);
659  queueDescriptor.m_Weight = &weightTensor;
660 
661  // Test 1: correct per-axis quantization values
662  const std::vector<float> biasPerAxisScales1 = { 3.75f, 5.25f };
663  const TensorInfo biasInfo1(biasShape, biasType, biasPerAxisScales1, 0);
664 
665  ScopedTensorHandle biasHandle1(biasInfo1);
666  queueDescriptor.m_Bias = &biasHandle1;
667 
668  CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
669 
670  // Test 2: wrong per-axis quantization values
671  const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
672  const TensorInfo biasInfo2(biasShape, biasType, biasPerAxisScales2, 0);
673 
674  ScopedTensorHandle biasHandle2(biasInfo2);
675  queueDescriptor.m_Bias = &biasHandle2;
676 
677  CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
678 
679  // Test 3: mismatched number of quantization scales
680  const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
681  const TensorInfo biasInfo3(biasShape, biasType, biasPerAxisScales3, 0);
682 
683  ScopedTensorHandle biasHandle3(biasInfo3);
684  queueDescriptor.m_Bias = &biasHandle3;
685 
686  CHECK_THROWS_AS(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
687 }
688 
689 }
TEST_SUITE("TestConstTensorLayerVisitor")
bool m_BiasEnabled
Enable/disable bias.
float m_K
Kappa value used for the across channel normalization equation.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
float m_Alpha
Alpha value for the normalization equation.
const ConstTensorHandle * m_Variance
NormalizationAlgorithmChannel
Definition: Types.hpp:180
const ConstTensorHandle * m_Weight
const ConstTensorHandle * m_Bias
Copyright (c) 2021 ARM Limited and Contributors.
const ConstTensorHandle * m_Bias
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
DataType
Definition: Types.hpp:35
std::vector< ViewOrigin > m_ViewOrigins
bool m_BiasEnabled
Enable/disable bias.
void Validate(const WorkloadInfo &workloadInfo) const
const ConstTensorHandle * m_Weight
std::vector< ViewOrigin > m_ViewOrigins
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Contains information about TensorInfos of a layer.
Krichevsky 2012: Local Brightness Normalization.
NormalizationAlgorithmMethod
Definition: Types.hpp:186
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.