ArmNN
 22.05
WorkloadDataValidation.cpp File Reference

Go to the source code of this file.

Functions

 TEST_SUITE ("WorkloadInfoValidation")
 

Function Documentation

◆ TEST_SUITE()

TEST_SUITE ( "WorkloadInfoValidation"  )

Definition at line 20 of file WorkloadDataValidation.cpp.

References armnn::Across, CreateWorkload(), armnn::Float32, armnn::info, armnn::Input, armnn::LocalBrightness, NormalizationDescriptor::m_Alpha, BatchNormalizationQueueDescriptor::m_Beta, NormalizationDescriptor::m_Beta, FullyConnectedQueueDescriptor::m_Bias, Convolution2dQueueDescriptor::m_Bias, FullyConnectedDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_BiasEnabled, BatchNormalizationQueueDescriptor::m_Gamma, NormalizationDescriptor::m_K, BatchNormalizationQueueDescriptor::m_Mean, NormalizationDescriptor::m_NormChannelType, NormalizationDescriptor::m_NormMethodType, NormalizationDescriptor::m_NormSize, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, FullyConnectedDescriptor::m_TransposeWeightMatrix, BatchNormalizationQueueDescriptor::m_Variance, SplitterQueueDescriptor::m_ViewOrigins, ConcatQueueDescriptor::m_ViewOrigins, FullyConnectedQueueDescriptor::m_Weight, Convolution2dQueueDescriptor::m_Weight, armnn::QAsymmU8, armnn::QSymmS8, armnn::Signed32, and Convolution2dQueueDescriptor::Validate().

21 {
22 TEST_CASE("BatchNormalizationQueueDescriptor_Validate_DifferentQuantizationData")
23 {
24  TensorShape inputShape { 1, 3, 2, 2 };
25  TensorShape outputShape { 1, 3, 2, 2 };
26 
27  TensorInfo inputTensorInfo(inputShape, armnn::DataType::QAsymmU8, .1f, 125);
28  TensorInfo outputTensorInfo(outputShape, armnn::DataType::QAsymmU8, .2f, 120);
29 
31  WorkloadInfo invalidInfo;
32 
33  unsigned int sameShape[] = { 10 };
34  TensorInfo sameInfo = armnn::TensorInfo(1, sameShape, armnn::DataType::QAsymmU8);
35  ScopedTensorHandle sameTensor(sameInfo);
36 
37  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
38  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
39 
40  invalidData.m_Mean = &sameTensor;
41  invalidData.m_Variance = &sameTensor;
42  invalidData.m_Beta= &sameTensor;
43  invalidData.m_Gamma = &sameTensor;
44 
45  CHECK_NOTHROW(RefBatchNormalizationWorkload(invalidData, invalidInfo));
46 }
47 
48 TEST_CASE("QueueDescriptor_Validate_WrongNumOfInputsOutputs")
49 {
50  InputQueueDescriptor invalidData;
51  WorkloadInfo invalidInfo;
52  //Invalid argument exception is expected, because no inputs and no outputs were defined.
53  CHECK_THROWS_AS(RefWorkloadFactory().CreateWorkload(LayerType::Input, invalidData, invalidInfo),
55 }
56 
57 TEST_CASE("RefPooling2dFloat32Workload_Validate_WrongDimTensor")
58 {
59  armnn::TensorInfo inputTensorInfo;
60  armnn::TensorInfo outputTensorInfo;
61 
62  unsigned int inputShape[] = {2, 3, 4}; // <- Invalid - input tensor has to be 4D.
63  unsigned int outputShape[] = {2, 3, 4, 5};
64 
65  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
66  inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::DataType::Float32);
67 
68  Pooling2dQueueDescriptor invalidData;
69  WorkloadInfo invalidInfo;
70 
71  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
72  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
73 
74  // Invalid argument exception is expected, input tensor has to be 4D.
75  CHECK_THROWS_AS(RefPooling2dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
76 }
77 
78 TEST_CASE("RefPooling3dFloat32Workload_Validate_WrongDimTensor")
79 {
80  armnn::TensorInfo inputTensorInfo;
81  armnn::TensorInfo outputTensorInfo;
82 
83  unsigned int inputShape[] = {2, 3, 4, 5}; // <- Invalid - input tensor has to be 5D.
84  unsigned int outputShape[] = {2, 3, 4, 5, 6};
85 
86  outputTensorInfo = armnn::TensorInfo(5, outputShape, armnn::DataType::Float32);
87  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
88 
89  Pooling3dQueueDescriptor invalidData;
90  WorkloadInfo invalidInfo;
91 
92  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
93  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
94 
95  // Invalid argument exception is expected, input tensor has to be 5D.
96  CHECK_THROWS_AS(RefPooling3dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
97 }
98 
99 TEST_CASE("SoftmaxQueueDescriptor_Validate_WrongInputHeight")
100 {
101  unsigned int inputHeight = 1;
102  unsigned int inputWidth = 1;
103  unsigned int inputChannels = 4;
104  unsigned int inputNum = 2;
105 
106  unsigned int outputChannels = inputChannels;
107  unsigned int outputHeight = inputHeight + 1; //Makes data invalid - Softmax expects height and width to be 1.
108  unsigned int outputWidth = inputWidth;
109  unsigned int outputNum = inputNum;
110 
111  armnn::TensorInfo inputTensorInfo;
112  armnn::TensorInfo outputTensorInfo;
113 
114  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
115  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
116 
117  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
118  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
119 
120  SoftmaxQueueDescriptor invalidData;
121  WorkloadInfo invalidInfo;
122 
123  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
124  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
125 
126  //Invalid argument exception is expected, because height != 1.
127  CHECK_THROWS_AS(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
128 }
129 
130 TEST_CASE("FullyConnectedQueueDescriptor_Validate_RequiredDataMissing")
131 {
132  unsigned int inputWidth = 1;
133  unsigned int inputHeight = 1;
134  unsigned int inputChannels = 5;
135  unsigned int inputNum = 2;
136 
137  unsigned int outputWidth = 1;
138  unsigned int outputHeight = 1;
139  unsigned int outputChannels = 3;
140  unsigned int outputNum = 2;
141 
142  // Define the tensor descriptors.
143  armnn::TensorInfo inputTensorInfo;
144  armnn::TensorInfo outputTensorInfo;
145  armnn::TensorInfo weightsDesc;
146  armnn::TensorInfo biasesDesc;
147 
148  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
149  unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
150  unsigned int weightsShape[] = { 1, 1, inputChannels, outputChannels };
151  unsigned int biasShape[] = { 1, outputChannels, outputHeight, outputWidth };
152 
153  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
154  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
155  weightsDesc = armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32);
156  biasesDesc = armnn::TensorInfo(4, biasShape, armnn::DataType::Float32);
157 
158  FullyConnectedQueueDescriptor invalidData;
159  WorkloadInfo invalidInfo;
160 
161  ScopedTensorHandle weightTensor(weightsDesc);
162  ScopedTensorHandle biasTensor(biasesDesc);
163 
164  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
165  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
166  invalidData.m_Weight = &weightTensor;
167  invalidData.m_Bias = &biasTensor;
168  invalidData.m_Parameters.m_BiasEnabled = true;
169  invalidData.m_Parameters.m_TransposeWeightMatrix = false;
170 
171 
172  //Invalid argument exception is expected, because not all required fields have been provided.
173  //In particular inputsData[0], outputsData[0] and weightsData can not be null.
174  CHECK_THROWS_AS(RefFullyConnectedWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
175 }
176 
177 
178 TEST_CASE("NormalizationQueueDescriptor_Validate_WrongInputHeight")
179 {
180  constexpr unsigned int inputNum = 5;
181  constexpr unsigned int inputHeight = 32;
182  constexpr unsigned int inputWidth = 24;
183  constexpr unsigned int inputChannels = 3;
184 
185  constexpr unsigned int outputNum = inputNum;
186  constexpr unsigned int outputChannels = inputChannels;
187  constexpr unsigned int outputHeight = inputHeight + 1; //Makes data invalid - normalization requires.
188  //Input and output to have the same dimensions.
189  constexpr unsigned int outputWidth = inputWidth;
190 
191 
192  armnn::TensorInfo inputTensorInfo;
193  armnn::TensorInfo outputTensorInfo;
194 
195  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
196  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
197 
198  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
199  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
200 
201 
204  float alpha = 1.f;
205  float beta = 1.f;
206  float kappa = 1.f;
207  uint32_t normSize = 5;
208 
209  NormalizationQueueDescriptor invalidData;
210  WorkloadInfo invalidInfo;
211 
212  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
213  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
214  invalidData.m_Parameters.m_NormChannelType = normChannel;
215  invalidData.m_Parameters.m_NormMethodType = normMethod;
216  invalidData.m_Parameters.m_NormSize = normSize;
217  invalidData.m_Parameters.m_Alpha = alpha;
218  invalidData.m_Parameters.m_Beta = beta;
219  invalidData.m_Parameters.m_K = kappa;
220 
221  //Invalid argument exception is expected, because input height != output height.
222  CHECK_THROWS_AS(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
223 }
224 
225 TEST_CASE("SplitterQueueDescriptor_Validate_WrongWindow")
226 {
227  constexpr unsigned int inputNum = 1;
228  constexpr unsigned int inputHeight = 32;
229  constexpr unsigned int inputWidth = 24;
230  constexpr unsigned int inputChannels = 3;
231 
232  constexpr unsigned int outputNum = inputNum;
233  constexpr unsigned int outputChannels = inputChannels;
234  constexpr unsigned int outputHeight = 18;
235  constexpr unsigned int outputWidth = inputWidth;
236 
237 
238  armnn::TensorInfo inputTensorInfo;
239  armnn::TensorInfo outputTensorInfo;
240 
241  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
242  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
243 
244  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
245  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
246 
247  SplitterQueueDescriptor invalidData;
248  WorkloadInfo invalidInfo;
249 
250  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
251  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
252 
253  // Invalid, since it has only 3 dimensions while the input tensor is 4d.
254  std::vector<unsigned int> wOrigin = {0, 0, 0};
256  invalidData.m_ViewOrigins.push_back(window);
257 
258  INFO("Invalid argument exception is expected, because split window dimensionality does not match input.");
259  CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
260 
261  // Invalid, since window extends past the boundary of input tensor.
262  std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
264  invalidData.m_ViewOrigins[0] = window3;
265  INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
266  CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
267 
268 
269  std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
271  invalidData.m_ViewOrigins[0] = window4;
272 
273  std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
275  invalidData.m_ViewOrigins.push_back(window5);
276 
277  INFO("Invalid exception due to number of split windows not matching number of outputs.");
278  CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
279 }
280 
281 
282 TEST_CASE("ConcatQueueDescriptor_Validate_WrongWindow")
283 {
284  constexpr unsigned int inputNum = 1;
285  constexpr unsigned int inputChannels = 3;
286  constexpr unsigned int inputHeight = 32;
287  constexpr unsigned int inputWidth = 24;
288 
289  constexpr unsigned int outputNum = 1;
290  constexpr unsigned int outputChannels = 3;
291  constexpr unsigned int outputHeight = 32;
292  constexpr unsigned int outputWidth = 24;
293 
294 
295  armnn::TensorInfo inputTensorInfo;
296  armnn::TensorInfo outputTensorInfo;
297 
298  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
299  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
300 
301  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
302  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
303 
304  ConcatQueueDescriptor invalidData;
305  WorkloadInfo invalidInfo;
306 
307  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
308  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
309 
310  // Invalid, since it has only 3 dimensions while the input tensor is 4d.
311  std::vector<unsigned int> wOrigin = {0, 0, 0};
313  invalidData.m_ViewOrigins.push_back(window);
314 
315  INFO("Invalid argument exception is expected, because merge window dimensionality does not match input.");
316  CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
317 
318  // Invalid, since window extends past the boundary of output tensor.
319  std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
321  invalidData.m_ViewOrigins[0] = window3;
322  INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
323  CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
324 
325 
326  std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
328  invalidData.m_ViewOrigins[0] = window4;
329 
330  std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
332  invalidData.m_ViewOrigins.push_back(window5);
333 
334  INFO("Invalid exception due to number of merge windows not matching number of inputs.");
335  CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
336 }
337 
338 TEST_CASE("AdditionQueueDescriptor_Validate_InputNumbers")
339 {
340  armnn::TensorInfo input1TensorInfo;
341  armnn::TensorInfo input2TensorInfo;
342  armnn::TensorInfo input3TensorInfo;
343  armnn::TensorInfo outputTensorInfo;
344 
345  unsigned int shape[] = {1, 1, 1, 1};
346 
347  input1TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
348  input2TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
349  input3TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
350  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
351 
352  AdditionQueueDescriptor invalidData;
353  WorkloadInfo invalidInfo;
354 
355  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
356  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
357 
358  // Too few inputs.
359  CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
360 
361  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
362 
363  // Correct.
364  CHECK_NOTHROW(RefAdditionWorkload<>(invalidData, invalidInfo));
365 
366  AddInputToWorkload(invalidData, invalidInfo, input3TensorInfo, nullptr);
367 
368  // Too many inputs.
369  CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
370 }
371 
372 TEST_CASE("AdditionQueueDescriptor_Validate_InputShapes")
373 {
374  armnn::TensorInfo input1TensorInfo;
375  armnn::TensorInfo input2TensorInfo;
376  armnn::TensorInfo outputTensorInfo;
377 
378  unsigned int shape1[] = {1, 1, 2, 1};
379  unsigned int shape2[] = {1, 1, 3, 2};
380 
381  // Incompatible shapes even with broadcasting.
382  {
383  input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
384  input2TensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
385  outputTensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
386 
387  AdditionQueueDescriptor invalidData;
388  WorkloadInfo invalidInfo;
389 
390  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
391  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
392  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
393 
394  CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
395  }
396 
397  // Output size not compatible with input sizes.
398  {
399  input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
400  input2TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
401  outputTensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
402 
403  AdditionQueueDescriptor invalidData;
404  WorkloadInfo invalidInfo;
405 
406  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
407  AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
408  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
409 
410  // Output differs.
411  CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
412  }
413 }
414 
415 TEST_CASE("MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch")
416 {
417  armnn::TensorInfo input0TensorInfo;
418  armnn::TensorInfo input1TensorInfo;
419  armnn::TensorInfo outputTensorInfo;
420 
421  constexpr unsigned int input0Shape[] = { 2, 2, 4, 4 };
422  constexpr std::size_t dimensionCount = std::extent<decltype(input0Shape)>::value;
423 
424  // Checks dimension consistency for input tensors.
425  for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
426  {
427  unsigned int input1Shape[dimensionCount];
428  for (unsigned int i = 0; i < dimensionCount; ++i)
429  {
430  input1Shape[i] = input0Shape[i];
431  }
432 
433  ++input1Shape[dimIndex];
434 
435  input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
436  input1TensorInfo = armnn::TensorInfo(dimensionCount, input1Shape, armnn::DataType::Float32);
437  outputTensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
438 
439  MultiplicationQueueDescriptor invalidData;
440  WorkloadInfo invalidInfo;
441 
442  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
443  AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
444  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
445 
446  CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
447  }
448 
449  // Checks dimension consistency for input and output tensors.
450  for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
451  {
452  unsigned int outputShape[dimensionCount];
453  for (unsigned int i = 0; i < dimensionCount; ++i)
454  {
455  outputShape[i] = input0Shape[i];
456  }
457 
458  ++outputShape[dimIndex];
459 
460  input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
461  input1TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
462  outputTensorInfo = armnn::TensorInfo(dimensionCount, outputShape, armnn::DataType::Float32);
463 
464  MultiplicationQueueDescriptor invalidData;
465  WorkloadInfo invalidInfo;
466 
467  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
468  AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
469  AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
470 
471  CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
472  }
473 }
474 
475 TEST_CASE("ReshapeQueueDescriptor_Validate_MismatchingNumElements")
476 {
477  armnn::TensorInfo inputTensorInfo;
478  armnn::TensorInfo outputTensorInfo;
479 
480  // The input and output shapes should have the same number of elements, but these don't.
481  unsigned int inputShape[] = { 1, 1, 2, 3 };
482  unsigned int outputShape[] = { 1, 1, 1, 2 };
483 
484  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
485  outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
486 
487  ReshapeQueueDescriptor invalidData;
488  WorkloadInfo invalidInfo;
489 
490  AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
491  AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
492 
493  // InvalidArgumentException is expected, because the number of elements don't match.
494  CHECK_THROWS_AS(RefReshapeWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
495 }
496 
497 
498 TEST_CASE("LstmQueueDescriptor_Validate")
499 {
501 
502  float qScale = 0.0f;
503  int32_t qOffset = 0;
504 
505  unsigned int batchSize = 2;
506  unsigned int outputSize = 3;
507  unsigned int inputSize = 5;
508  unsigned numUnits = 4;
509 
510  armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, dataType, qScale, qOffset );
511  armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, dataType, qScale, qOffset);
512  armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, dataType, qScale, qOffset);
513 
514  // Scratch buffer size with CIFG [batchSize, numUnits * 4]
515  armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset);
516  armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, dataType, qScale, qOffset);
517  armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
518  armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset);
519 
520  armnn::TensorInfo tensorInfo3({outputSize}, dataType, qScale, qOffset);
521  armnn::TensorInfo tensorInfo4({numUnits}, dataType, qScale, qOffset);
522  armnn::TensorInfo tensorInfo4x5({numUnits, inputSize}, dataType, qScale, qOffset);
523  armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, dataType, qScale, qOffset);
524  armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, dataType, qScale, qOffset);
525 
526  LstmQueueDescriptor data;
528 
529  AddInputToWorkload(data, info, inputTensorInfo, nullptr);
530  AddInputToWorkload(data, info, outputStateInTensorInfo, nullptr);
531  AddInputToWorkload(data, info, cellStateInTensorInfo, nullptr);
532 
533  AddOutputToWorkload(data, info, scratchBufferTensorInfo, nullptr);
534  AddOutputToWorkload(data, info, outputStateOutTensorInfo, nullptr);
535  AddOutputToWorkload(data, info, cellStateOutTensorInfo, nullptr);
536  // AddOutputToWorkload(data, info, outputTensorInfo, nullptr); is left out
537 
538  armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
539  armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
540  armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
541  armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
542  armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
543  armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
544  armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
545  armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
546  armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
547  armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
548  armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
549  armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
550  armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
551  armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
552  armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
553  armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo3x4);
554  armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo3);
555  armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
556  armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
557  armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
558  armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
559 
560  data.m_InputToInputWeights = &inputToInputWeightsTensor;
561  data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
562  data.m_InputToCellWeights = &inputToCellWeightsTensor;
563  data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
564  data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
565  data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
566  data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
567  data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
568  data.m_CellToInputWeights = &cellToInputWeightsTensor;
569  data.m_InputGateBias = &inputGateBiasTensor;
570  data.m_ForgetGateBias = &forgetGateBiasTensor;
571  data.m_CellBias = &cellBiasTensor;
572  data.m_OutputGateBias = &outputGateBiasTensor;
573  data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
574  data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
575  data.m_ProjectionWeights = &projectionWeightsTensor;
576  data.m_ProjectionBias = &projectionBiasTensor;
577 
578  data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
579  data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
580  data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
581  data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
582 
583  // Flags to set test configuration
584  data.m_Parameters.m_ActivationFunc = 4;
585  data.m_Parameters.m_CifgEnabled = false;
586  data.m_Parameters.m_PeepholeEnabled = true;
587  data.m_Parameters.m_ProjectionEnabled = true;
588  data.m_Parameters.m_LayerNormEnabled = true;
589 
590  // check wrong number of outputs
591  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
592  AddOutputToWorkload(data, info, outputTensorInfo, nullptr);
593 
594  // check wrong cifg parameter configuration
595  data.m_Parameters.m_CifgEnabled = true;
596  armnn::TensorInfo scratchBufferTensorInfo2({batchSize, numUnits * 3}, dataType, qScale, qOffset);
597  SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo2, nullptr);
598  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
599  data.m_Parameters.m_CifgEnabled = false;
600  SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo, nullptr);
601 
602  // check wrong inputGateBias configuration
603  data.m_InputGateBias = nullptr;
604  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
605  data.m_InputGateBias = &inputGateBiasTensor;
606 
607  // check inconsistant projection parameters
608  data.m_Parameters.m_ProjectionEnabled = false;
609  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
610  data.m_Parameters.m_ProjectionEnabled = true;
611  data.m_ProjectionWeights = nullptr;
612  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
613  data.m_ProjectionWeights = &projectionWeightsTensor;
614 
615  // check missing input layer normalisation weights
616  data.m_InputLayerNormWeights = nullptr;
617  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
618  data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
619 
620  // layer norm disabled but normalisation weights are present
621  data.m_Parameters.m_LayerNormEnabled = false;
622  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
623  data.m_Parameters.m_LayerNormEnabled = true;
624 
625  // check invalid outputTensor shape
626  armnn::TensorInfo incorrectOutputTensorInfo({batchSize, outputSize + 1}, dataType, qScale, qOffset);
627  SetWorkloadOutput(data, info, 3, incorrectOutputTensorInfo, nullptr);
628  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
629  SetWorkloadOutput(data, info, 3, outputTensorInfo, nullptr);
630 
631  // check invalid cell clipping parameters
632  data.m_Parameters.m_ClippingThresCell = -1.0f;
633  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
634  data.m_Parameters.m_ClippingThresCell = 0.0f;
635 
636  // check invalid projection clipping parameters
637  data.m_Parameters.m_ClippingThresProj = -1.0f;
638  CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
639  data.m_Parameters.m_ClippingThresProj = 0.0f;
640 
641  // check correct configuration
642  CHECK_NOTHROW(data.Validate(info));
643 }
644 
645 TEST_CASE("BiasPerAxisQuantization_ValidateCorrectValues")
646 {
647  constexpr unsigned int nInput = 1u;
648  constexpr unsigned int cInput = 3u;
649  constexpr unsigned int hInput = 3u;
650  constexpr unsigned int wInput = 3u;
651 
652  constexpr unsigned int nOutput = nInput;
653  constexpr unsigned int cOutput = cInput;
654  constexpr unsigned int hOutput = 1u;
655  constexpr unsigned int wOutput = 1u;
656 
657  const TensorShape inputShape { nInput, cInput, hInput, wInput };
658  const TensorShape outputShape{ nOutput, cOutput, hOutput, wOutput };
659  const TensorShape weightShape{ cOutput, cInput, hInput, wInput };
660  const TensorShape biasShape { cOutput };
661 
662  constexpr DataType inputType = DataType::QAsymmU8;
663  constexpr DataType weightType = DataType::QSymmS8;
664  constexpr DataType biasType = DataType::Signed32;
665 
666  constexpr float perTensorScale = 1.5f;
667  const TensorInfo inputInfo (inputShape, inputType, perTensorScale);
668  const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
669 
670  const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
671  const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
672 
673  Convolution2dQueueDescriptor queueDescriptor;
674  queueDescriptor.m_Parameters.m_BiasEnabled = true;
675 
676  WorkloadInfo workloadInfo;
677  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
678  AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
679  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
680 
681  ScopedTensorHandle weightTensor(weightInfo);
682  queueDescriptor.m_Weight = &weightTensor;
683 
684  // Test 1: correct per-axis quantization values
685  const std::vector<float> biasPerAxisScales1 = { 3.75f, 5.25f };
686  const TensorInfo biasInfo1(biasShape, biasType, biasPerAxisScales1, 0);
687 
688  ScopedTensorHandle biasHandle1(biasInfo1);
689  queueDescriptor.m_Bias = &biasHandle1;
690 
691  AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo1, nullptr);
692 
693  CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
694 }
695 
696 TEST_CASE("BiasPerAxisQuantization_ValidateIncorrectValues")
697 {
698  constexpr unsigned int nInput = 1u;
699  constexpr unsigned int cInput = 3u;
700  constexpr unsigned int hInput = 3u;
701  constexpr unsigned int wInput = 3u;
702 
703  constexpr unsigned int nOutput = nInput;
704  constexpr unsigned int cOutput = cInput;
705  constexpr unsigned int hOutput = 1u;
706  constexpr unsigned int wOutput = 1u;
707 
708  const TensorShape inputShape { nInput, cInput, hInput, wInput };
709  const TensorShape outputShape{ nOutput, cOutput, hOutput, wOutput };
710  const TensorShape weightShape{ cOutput, cInput, hInput, wInput };
711  const TensorShape biasShape { cOutput };
712 
713  constexpr DataType inputType = DataType::QAsymmU8;
714  constexpr DataType weightType = DataType::QSymmS8;
715  constexpr DataType biasType = DataType::Signed32;
716 
717  constexpr float perTensorScale = 1.5f;
718  const TensorInfo inputInfo (inputShape, inputType, perTensorScale);
719  const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
720 
721  const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
722  const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
723 
724  Convolution2dQueueDescriptor queueDescriptor;
725  queueDescriptor.m_Parameters.m_BiasEnabled = true;
726 
727  WorkloadInfo workloadInfo;
728  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
729  AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
730  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
731 
732  ScopedTensorHandle weightTensor(weightInfo);
733  queueDescriptor.m_Weight = &weightTensor;
734 
735  // Test 2: wrong per-axis quantization values
736  const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
737  const TensorInfo biasInfo2(biasShape, biasType, biasPerAxisScales2, 0);
738 
739  ScopedTensorHandle biasHandle2(biasInfo2);
740  queueDescriptor.m_Bias = &biasHandle2;
741 
742  AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo2, nullptr);
743 
744  CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
745 
746 }
747 
748 TEST_CASE("BiasPerAxisQuantization_ValidateInvalidArgumentException")
749 {
750  constexpr unsigned int nInput = 1u;
751  constexpr unsigned int cInput = 3u;
752  constexpr unsigned int hInput = 3u;
753  constexpr unsigned int wInput = 3u;
754 
755  constexpr unsigned int nOutput = nInput;
756  constexpr unsigned int cOutput = cInput;
757  constexpr unsigned int hOutput = 1u;
758  constexpr unsigned int wOutput = 1u;
759 
760  const TensorShape inputShape { nInput, cInput, hInput, wInput };
761  const TensorShape outputShape{ nOutput, cOutput, hOutput, wOutput };
762  const TensorShape weightShape{ cOutput, cInput, hInput, wInput };
763  const TensorShape biasShape { cOutput };
764 
765  constexpr DataType inputType = DataType::QAsymmU8;
766  constexpr DataType weightType = DataType::QSymmS8;
767  constexpr DataType biasType = DataType::Signed32;
768 
769  constexpr float perTensorScale = 1.5f;
770  const TensorInfo inputInfo (inputShape, inputType, perTensorScale);
771  const TensorInfo outputInfo(outputShape, inputType, perTensorScale);
772 
773  const std::vector<float> weightPerAxisScales = { 2.50f, 3.50f };
774  const TensorInfo weightInfo(weightShape, weightType, weightPerAxisScales, 0);
775 
776  Convolution2dQueueDescriptor queueDescriptor;
777  queueDescriptor.m_Parameters.m_BiasEnabled = true;
778 
779  WorkloadInfo workloadInfo;
780  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, nullptr);
781  AddInputToWorkload(queueDescriptor, workloadInfo, weightInfo, nullptr);
782  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, nullptr);
783 
784  ScopedTensorHandle weightTensor(weightInfo);
785  queueDescriptor.m_Weight = &weightTensor;
786 
787  // Test 3: mismatched number of quantization scales
788  const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
789  const TensorInfo biasInfo3(biasShape, biasType, biasPerAxisScales3, 0);
790 
791  ScopedTensorHandle biasHandle3(biasInfo3);
792  queueDescriptor.m_Bias = &biasHandle3;
793 
794  AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo3, nullptr);
795 
796  CHECK_THROWS_AS(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
797 }
798 
799 
800 }
bool m_BiasEnabled
Enable/disable bias.
float m_K
Kappa value used for the across channel normalization equation.
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
float m_Alpha
Alpha value for the normalization equation.
const ConstTensorHandle * m_Variance
NormalizationAlgorithmChannel
Definition: Types.hpp:193
const ConstTensorHandle * m_Weight
const ConstTensorHandle * m_Bias
const ConstTensorHandle * m_Bias
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
DataType
Definition: Types.hpp:48
std::vector< ViewOrigin > m_ViewOrigins
bool m_BiasEnabled
Enable/disable bias.
void Validate(const WorkloadInfo &workloadInfo) const
const ConstTensorHandle * m_Weight
std::vector< ViewOrigin > m_ViewOrigins
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Contains information about TensorInfos of a layer.
Krichevsky 2012: Local Brightness Normalization.
NormalizationAlgorithmMethod
Definition: Types.hpp:199
float m_Beta
Beta value for the normalization equation.
uint32_t m_NormSize
Depth radius value.