ArmNN
 22.05
WorkloadData.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
12 #include <armnn/Logging.hpp>
13 
14 #include <algorithm>
15 #include <iomanip>
16 #include <string>
17 #include <sstream>
18 
19 #include <fmt/format.h>
20 
21 using namespace armnnUtils;
22 
23 namespace armnn
24 {
25 
26 //---------------------------------------------------------------
28 {
29  switch (inputDataType)
30  {
31  case DataType::Float16:
32  return DataType::Float16;
33  case DataType::BFloat16:
34  case DataType::Float32:
35  return DataType::Float32;
36  case DataType::QAsymmS8:
37  return DataType::Signed32;
38  case DataType::QAsymmU8:
39  return DataType::Signed32;
40  case DataType::QSymmS8:
41  return DataType::Signed32;
42  case DataType::QSymmS16:
43  return DataType::Signed32;
44  default:
45  ARMNN_ASSERT_MSG(false, "Invalid input data type");
46  return DataType::Float32;
47  }
48 }
49 
50 namespace
51 {
52 
53 //---------------------------------------------------------------
54 //android ndk does not support std::to_string function.
55 template <typename T>
56 std::string to_string(T value)
57 {
58  std::ostringstream os;
59  os << value;
60  return os.str();
61 }
62 
63 //---------------------------------------------------------------
64 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
65 {
66  if (!ptr)
67  {
68  throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
69  paramName + " parameter must be set.");
70  }
71 }
72 
73 //---------------------------------------------------------------
74 void ValidateTensorShapesMatch(const TensorInfo& first,
75  const TensorInfo& second,
76  std::string const& descName,
77  std::string const& firstName,
78  std::string const& secondName)
79 {
80  if (first.GetShape() != second.GetShape())
81  {
82  throw InvalidArgumentException(descName + ": "
83  + firstName + " & " + secondName + " must have identical shapes");
84  }
85 }
86 
87 //---------------------------------------------------------------
88 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
89 {
90  if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
91  {
92  throw InvalidArgumentException(descName +
93  ": Requires exactly " + to_string(expectedSize) + "input(s). " +
94  to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
95  }
96 }
97 
98 //---------------------------------------------------------------
99 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
100 {
101  if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
102  {
103  throw InvalidArgumentException(descName +
104  ": Requires exactly " + to_string(expectedSize) + " output(s). " +
105  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
106  }
107 }
108 
109 //---------------------------------------------------------------
110 
111 //---------------------------------------------------------------
112 void ValidateTensorNumElements(const TensorInfo& tensor,
113  std::string const& descName,
114  unsigned int numElements,
115  std::string const& tensorName)
116 {
117  if (tensor.GetNumElements() != numElements)
118  {
119  throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
120  to_string(tensor.GetNumElements()) + " elements for " +
121  tensorName + " tensor.");
122  }
123 }
124 
125 //---------------------------------------------------------------
126 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
127  const std::string& descName, std::string const& tensorName)
128 {
129  if (tensor.GetDataType() != dataType)
130  {
131  throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
132  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
133  }
134 }
135 
136 void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
137 {
138  if (tensor.GetDataType() != DataType::QSymmS8)
139  {
140  throw InvalidArgumentException(descName +
141  ": Expected data type which supports per-axis quantization scheme but got " +
142  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
143  }
144 }
145 
146 //---------------------------------------------------------------
147 void ValidateTensorQuantizationSpace(const TensorInfo& first,
148  const TensorInfo& second,
149  const std::string& descName,
150  std::string const& firstName,
151  std::string const& secondName)
152 {
153  if (!first.IsQuantized() ||
154  !second.IsQuantized())
155  {
156  // Not a quantized type, ignore the validation
157  return;
158  }
159 
160  DataType firstDataType = first.GetDataType();
161  DataType secondDataType = second.GetDataType();
162 
163  if (firstDataType != secondDataType)
164  {
165  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
166  " must be of the same quantized type, " +
167  firstName + " is " + GetDataTypeName(firstDataType) + ", " +
168  secondName + " is " + GetDataTypeName(secondDataType));
169  }
170 
171  if (!first.IsTypeSpaceMatch(second))
172  {
173  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
174  " must have the same quantization space, " +
175  firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
176  " and scale " + to_string(first.GetQuantizationScale()) + ", " +
177  secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
178  " and scale " + to_string(second.GetQuantizationScale()));
179  }
180 }
181 
182 //---------------------------------------------------------------
183 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
184  const TensorInfo& inputTensorInfo,
185  const TensorInfo& weightsTensorInfo,
186  const std::string& descName)
187 {
188  // Helper lambda function to validate a single bias quantization scale value
189  auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
190  {
191  constexpr float tolerance = 0.0001f;
192  if (std::abs(biasScale - expectedScale) > tolerance)
193  {
194  // Print the float values with extra precision to see very small differences
195  ARMNN_LOG(warning) << std::setprecision(6) << descName << ": Expected " << expectedScale <<
196  " for bias quantization scale (product of input and weight scales), but got " <<
197  biasScale << ". Using scale provided.";
198  }
199  };
200 
201  if (biasTensor.GetQuantizationOffset() != 0)
202  {
203  throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
204  to_string(biasTensor.GetQuantizationOffset()));
205  }
206 
207  if (biasTensor.HasMultipleQuantizationScales() || weightsTensorInfo.HasMultipleQuantizationScales())
208  {
209  // Validate per-axis quantization scales
210  const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
211  const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
212 
213  if (weightScales.size() != biasScales.size())
214  {
215  std::stringstream msg;
216  msg << descName << ": Expected matching number of per-axis quantization scales for weights and bias, "
217  << "but got different values. This is currently unsupported: weights=" << weightScales.size()
218  << ", biases=" << biasScales.size();
219  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
220  }
221 
222  for (size_t i = 0ul; i < biasScales.size(); ++i)
223  {
224  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
225  VerifyBiasQuantizationScale(biasScales[i], expectedScale);
226  }
227  }
228  else
229  {
230  // Validate per-tensor quantization scale
231  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
232  VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
233  }
234 }
235 
236 //---------------------------------------------------------------
237 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
238  unsigned int numExpected,
239  const std::string& descName,
240  const std::string& varName)
241 {
242  if (vec.empty() && numExpected > 0)
243  {
244  throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
245  }
246 
247  for (unsigned int i = 0; i < numExpected; ++i)
248  {
249  if (!vec[i])
250  {
251  throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
252  }
253  }
254 }
255 
256 //---------------------------------------------------------------
257 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
258  const TensorInfo& second,
259  const TensorInfo& output,
260  std::string const& descName,
261  std::string const& firstName,
262  std::string const& secondName)
263 {
264  // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
265  // broadcasted.
266  if (first.GetNumDimensions() != second.GetNumDimensions())
267  {
268  throw InvalidArgumentException(descName + ": Tensors "
269  + firstName + " & " + secondName
270  + " must have the same number of dimensions in order to be broadcasted");
271  }
272  uint32_t numDims = first.GetNumDimensions();
273  std::vector<uint32_t> outputDims(numDims, 0u);
274  for (uint32_t i = 0; i < numDims; i++)
275  {
276  const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
277  const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
278  if (dimsNotEqual && dimsNotOne)
279  {
280  throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
281  }
282  outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
283  }
284  TensorShape broadcastShape = TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
285  if (broadcastShape != output.GetShape())
286  {
287  throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
288  + firstName + " & " + secondName
289  + " does not match the output shape");
290  }
291 }
292 
293 //---------------------------------------------------------------
294 void ValidateDataTypes(const TensorInfo& info,
295  const std::vector<armnn::DataType>& supportedTypes,
296  std::string const& descName)
297 {
298  auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
299  if (iterator == supportedTypes.end())
300  {
301  throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
302  }
303 }
304 
305 //---------------------------------------------------------------
306 void ValidateTensorDataTypesMatch(const TensorInfo& first,
307  const TensorInfo& second,
308  std::string const& descName,
309  std::string const& firstName,
310  std::string const& secondName)
311 {
312  if (first.GetDataType() != second.GetDataType())
313  {
314  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
315  " must have identical data types.");
316  }
317 }
318 
319 //---------------------------------------------------------------
320 void ValidateTensorNumElementsMatch(const TensorInfo& first,
321  const TensorInfo& second,
322  std::string const& descName,
323  std::string const& firstName,
324  std::string const& secondName)
325 {
326  if (first.GetNumElements() != second.GetNumElements())
327  {
328  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
329  " must have the same number of elements.");
330  }
331 }
332 
333 void ValidateWeightDataType(const TensorInfo& inputInfo,
334  const TensorInfo& weightInfo,
335  const std::string& descName)
336 {
337  const DataType inputType = inputInfo.GetDataType();
338  if (IsQuantized8BitType(inputType))
339  {
340  const std::vector<DataType> validTypes =
341  {
342  DataType::QAsymmS8,
343  DataType::QAsymmU8,
344  DataType::QSymmS8
345  };
346 
347  ValidateDataTypes(weightInfo, validTypes, descName);
348  }
349  else
350  {
351  ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
352  }
353 }
354 
355 void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
356  const std::string& descName,
357  const std::string& tensorName)
358 {
359  const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
360  if (!quantizationDim.has_value())
361  {
362  throw InvalidArgumentException(fmt::format("{0}: Quantization dimension for per-axis quantization "
363  "not set on tensor {1}.", descName, tensorName));
364  }
365 }
366 
367 void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
368  const std::string& descName,
369  const std::string& tensorName)
370 {
371  int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
372  if (quantizationOffset != 0)
373  {
374  throw InvalidArgumentException(fmt::format(
375  "{0}: Quantization offset for per-axis quantization expected to be 0 on tensor {1}, but got: {2}",
376  descName, tensorName, quantizationOffset));
377  }
378 }
379 
380 void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
381  const TensorInfo& outputInfo,
382  const TensorInfo& weightInfo,
383  const Optional<TensorInfo>& optionalBiasInfo,
384  const std::string& descName)
385 {
386  if (weightInfo.HasPerAxisQuantization())
387  {
388  const DataType inputDataType = inputInfo.GetDataType();
389  const DataType outputDataType = outputInfo.GetDataType();
390 
391  const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
392 
393  if (!canHavePerAxisQuantization)
394  {
395  throw InvalidArgumentException(fmt::format(
396  "{0}: Per-axis quantization parameters set on tensor {1}, but data type does not support "
397  "per-axis quantization.", descName, "weight"));
398  }
399 
400 
401  ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
402  ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
403  ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
404 
405  if (optionalBiasInfo.has_value())
406  {
407  const TensorInfo& biasInfo = optionalBiasInfo.value();
408  if (!biasInfo.HasPerAxisQuantization())
409  {
410  throw InvalidArgumentException(fmt::format(
411  "{}: Per-axis quantization parameters not set on bias tensor, "
412  "despite being set on weight tensor.", descName));
413  }
414 
415  ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
416  ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
417  ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
418  }
419  }
420 }
421 
422 } // anonymous namespace
423 
424 //---------------------------------------------------------------
426  std::string const& descName,
427  unsigned int numDimensions,
428  std::string const& tensorName) const
429 {
430  // If we're allowing expanded dimensions then numDimensions becomes the minimum number of Dimensions we can allow.
431  // Throw an Exception if the tensors has fewer than numDimensions or if the squeezed dimensions are greater than
432  // numDimensions.
434  {
435  unsigned int squeezedDims = 0;
436 
437  for (unsigned int i = 0; i < tensor.GetNumDimensions(); ++i)
438  {
439  if (tensor.GetShape()[i] != 1)
440  {
441  ++squeezedDims;
442  }
443  }
444  if (tensor.GetNumDimensions() < numDimensions || squeezedDims > numDimensions)
445  {
446  throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " or less but got " +
447  to_string(tensor.GetNumDimensions()) + " dimensions for " +
448  tensorName + " tensor.");
449  }
450  }
451  else
452  {
453  if (tensor.GetNumDimensions() != numDimensions)
454  {
455  throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
456  to_string(tensor.GetNumDimensions()) + " dimensions for " +
457  tensorName + " tensor.");
458  }
459  }
460 }
461 
462 //---------------------------------------------------------------
464  unsigned int numDimension,
465  unsigned int numElements,
466  std::string const& tensorName) const
467 {
468  const std::string functionName{"ValidateTensorNumDimNumElem"};
469  ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
470  ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
471 }
472 
473 //---------------------------------------------------------------
474 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
475  unsigned int numExpectedIn, unsigned int numExpectedOut) const
476 {
477  ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
478  ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
479 }
480 
481 //---------------------------------------------------------------
482 void MapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
483 {
484  const std::string descriptorName{"MapQueueDescriptor"};
485 
486  ValidateNumInputs(workloadInfo, descriptorName, 1);
487  ValidateNumOutputs(workloadInfo, descriptorName, 0);
488 
489  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
490  {
491  if (!m_Inputs[i])
492  {
494  fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
495  }
496  }
497 }
498 
499 //---------------------------------------------------------------
500 void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
501 {
502  const std::string descriptorName{"UnmapQueueDescriptor"};
503 
504  ValidateNumInputs(workloadInfo, descriptorName, 1);
505  ValidateNumOutputs(workloadInfo, descriptorName, 0);
506 
507  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
508  {
509  if (!m_Inputs[i])
510  {
512  fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
513  }
514  }
515 }
516 
517 //---------------------------------------------------------------
518 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
519 {
520  const std::string descriptorName{"MemCopyQueueDescriptor"};
521 
522  ValidateNumInputs(workloadInfo, descriptorName, 1);
523  ValidateNumOutputs(workloadInfo, descriptorName , 1);
524 
525  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
526  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
527 
528  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
529  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
530 
531  if (m_Inputs.size() != m_Outputs.size())
532  {
533  throw InvalidArgumentException(fmt::format(
534  "{0}: Number of inputs ({1}) does not match the number of outputs ({2}).",
535  descriptorName, m_Inputs.size(), m_Outputs.size()));
536  }
537 
538  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
539  {
540  if (!m_Inputs[i])
541  {
542  throw InvalidArgumentException(fmt::format(
543  "{0}: Invalid NULL input {1}.", descriptorName, i));
544  }
545 
546  if (!m_Outputs[i])
547  {
548  throw InvalidArgumentException(fmt::format("{0}: Invalid NULL output {1}", descriptorName, i));
549  }
550  }
551 }
552 
553 //---------------------------------------------------------------
554 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
555 {
556  ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
557  ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
558 
559  if (workloadInfo.m_InputTensorInfos.size() != 1)
560  {
561  throw InvalidArgumentException(fmt::format("Number of input infos ({}) is not 1.",
562  workloadInfo.m_InputTensorInfos.size()));
563 
564  }
565 
566  if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
567  {
568  throw InvalidArgumentException(fmt::format(
569  "Number of input infos ({0}) does not match the number of output infos ({1})",
570  workloadInfo.m_InputTensorInfos.size(), workloadInfo.m_OutputTensorInfos.size()));
571  }
572 
573  for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
574  {
575  if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
576  workloadInfo.m_OutputTensorInfos[i].GetNumElements())
577  {
578  throw InvalidArgumentException(fmt::format(
579  "Number of elements for tensor input and output {} does not match", i ));
580  }
581  }
582 
583  if (m_Inputs.size() != 1)
584  {
585  throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
586  }
587 
588  if (m_Inputs.size() != m_Outputs.size())
589  {
590  throw InvalidArgumentException(fmt::format(
591  "Number of inputs ({0}) does not match the number of outputs ({1})",
592  m_Inputs.size(), m_Outputs.size()));
593  }
594 
595  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
596  {
597  if (!m_Inputs[i])
598  {
599  throw InvalidArgumentException(fmt::format("Invalid null input {}", i));
600  }
601 
602  if (!m_Outputs[i])
603  {
604  throw InvalidArgumentException(fmt::format("Invalid null output {}", i));
605  }
606  }
607 }
608 
609 //---------------------------------------------------------------
610 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
611 {
612  ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
613 
614  if (m_Inputs.size() != 1)
615  {
616  throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
617  }
618 
619  if (m_Outputs.size() != 0)
620  {
621  throw InvalidArgumentException(fmt::format("Number of outputs ({}) is not 0.", m_Outputs.size()));
622  }
623 
624  if (!m_Inputs[0])
625  {
626  throw InvalidArgumentException(fmt::format("Invalid null input 0"));
627  }
628 }
629 
630 //---------------------------------------------------------------
631 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
632 {
633  const std::string descriptorName{"ActivationQueueDescriptor"};
634 
635  ValidateNumInputs(workloadInfo, descriptorName, 1);
636  ValidateNumOutputs(workloadInfo, descriptorName, 1);
637 
638  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
639  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
640 
641  std::vector<DataType> supportedTypes =
642  {
649  };
650 
651  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
652  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
653  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
654 }
655 
656 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
657 {
658  const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
659 
660  ValidateNumInputs(workloadInfo, descriptorName, 1);
661  ValidateNumOutputs(workloadInfo, descriptorName, 1);
662 
663  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
664  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
665 
666  if (outputTensorInfo.GetDataType() != DataType::Signed32 &&
667  outputTensorInfo.GetDataType() != DataType::Signed64)
668  {
669  throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32 or Int64.");
670  }
671 
672  std::vector<DataType> supportedInputTypes =
673  {
682  };
683 
684  ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
685 
686  auto inputShape = inputTensorInfo.GetShape();
687  auto outputShape = outputTensorInfo.GetShape();
688 
689  auto inputNumDimensions = inputShape.GetNumDimensions();
690  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
691 
692  const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
693 
694  // 1D input shape results in scalar output shape
695  if (inputShape.GetNumDimensions() == 1)
696  {
697  if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
698  {
699  throw InvalidArgumentException(descriptorName + outputShapeError);
700  }
701  }
702  else
703  {
704  for (unsigned int i = 0; i < unsignedAxis; ++i)
705  {
706  if (outputShape[i] != inputShape[i])
707  {
708  throw InvalidArgumentException(descriptorName + outputShapeError);
709  }
710  }
711 
712  for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
713  {
714  if (outputShape[i - 1] != inputShape[i])
715  {
716  throw InvalidArgumentException(descriptorName + outputShapeError);
717  }
718  }
719  }
720 }
721 
722 void CastQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
723 {
724  const std::string descriptorName{"CastQueueDescriptor"};
725 
726  ValidateNumInputs(workloadInfo, descriptorName, 1);
727  ValidateNumOutputs(workloadInfo, descriptorName, 1);
728 
729  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
730  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
731 
732  std::vector<DataType> supportedTypes =
733  {
743  };
744 
745  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
746  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
747 }
748 
749 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
750 {
751  const std::string descriptorName{"SoftmaxQueueDescriptor"};
752 
753  ValidateNumInputs(workloadInfo, descriptorName, 1);
754  ValidateNumOutputs(workloadInfo, descriptorName, 1);
755 
756  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
757  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
758 
759  std::vector<DataType> supportedTypes =
760  {
767  };
768 
769  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
770  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
771  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
772 }
773 
774 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
775 {
776  const std::string descriptorName{"SplitterQueueDescriptor"};
777 
778  ValidateNumInputs(workloadInfo, descriptorName, 1);
779 
780  // Check the supported data types
781  std::vector<DataType> supportedTypes =
782  {
791  };
792 
793  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
794  for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
795  {
796  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
797  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
798 
799  const std::string outputName = "output_" + std::to_string(i);
800  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
801  }
802 
803  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
804  {
805  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
806  }
807 
808  if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
809  {
811  descriptorName + ": Number of split windows "
812  "has to match number of workloadInfo.m_OutputTensorInfos. "
813  "Number of windows: " +
814  to_string(m_ViewOrigins.size()) +
815  ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
816  }
817 
818  //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
819  std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
820  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
821  {
822  //Checks that the dimensionality of input is same as the split windows.
823  ViewOrigin const& e = m_ViewOrigins[w];
824  if (e.m_Origin.size() != inputDims)
825  {
826  throw InvalidArgumentException(descriptorName + ": Window origin have to "
827  "have the same dimensionality as the input tensor. "
828  "Window origin (index: " +
829  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
830  " dimensions, the input "
831  "tensor has " +
832  to_string(inputDims) + " dimensions.");
833  }
834  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
835  {
836  if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
837  workloadInfo.m_InputTensorInfos[0].GetShape()[i])
838  {
839  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
840  "be smaller or equal than the size of the input in that coord.");
841  }
842  }
843  }
844 }
845 
846 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
847 {
848  const std::string descriptorName{"ConcatQueueDescriptor"};
849 
850  ValidateNumOutputs(workloadInfo, descriptorName, 1);
851 
852  if (m_Inputs.size() <= 0)
853  {
854  throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
855  }
856  if (m_Outputs.size() <= 0)
857  {
858  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
859  }
860 
861  if (workloadInfo.m_InputTensorInfos.size() <= 0)
862  {
863  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
864  }
865  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
866  {
867  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
868  }
869 
870  if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
871  {
872  throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
873  }
874 
875  if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
876  {
877  return;
878  }
879 
880  if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
881  {
883  descriptorName + ": Number of split windows "
884  "has to match number of workloadInfo.m_InputTensorInfos. "
885  "Number of windows: " +
886  to_string(m_ViewOrigins.size()) +
887  ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
888  }
889 
890  //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
891  std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
892  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
893  {
894  //Checks that the dimensionality of output is same as the split windows.
895  ViewOrigin const& e = m_ViewOrigins[w];
896  if (e.m_Origin.size() != outputDims)
897  {
898  throw InvalidArgumentException(descriptorName + ": Window origin have to "
899  "have the same dimensionality as the output tensor. "
900  "Window origin (index: " +
901  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
902  " dimensions, the output "
903  "tensor has " +
904  to_string(outputDims) + " dimensions.");
905  }
906  //Checks that the merge windows are within the output tensor.
907  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
908  {
909  if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
910  > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
911  {
912  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
913  "be smaller or equal than the size of the output in that coord.");
914  }
915  }
916  }
917 
918  // Check the supported data types
919  std::vector<DataType> supportedTypes =
920  {
929  };
930 
931  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
932  for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
933  {
934  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
935  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
936 
937  const std::string inputName = "input_" + std::to_string(i);
938  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
939  }
940 }
941 
942 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
943 {
944  const std::string descriptorName{"StackQueueDescriptor"};
945 
946  ValidateNumOutputs(workloadInfo, descriptorName, 1);
947 
948  if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
949  {
950  throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
951  }
952 
953  // All inputs must have the same shape, which is defined in parameters
954  const TensorShape& inputShape = m_Parameters.m_InputShape;
955  for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
956  {
957  if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
958  {
959  throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
960  }
961  }
962 
963  if (inputShape.GetNumDimensions() > 4)
964  {
965  throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
966  }
967 
968  // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
969  // since the output tensor has an additional dimension.
970  if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
971  {
972  throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
973  "than the number of input dimensions.");
974  }
975 
976  // Output shape must be as inferred from the input shape
977  const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
978  for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
979  {
980  if (outputShape[i] != inputShape[i])
981  {
982  throw InvalidArgumentException(descriptorName + ": Output tensor must "
983  "match shape inferred from input tensor.");
984  }
985  }
986 
987  if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
988  {
989  throw InvalidArgumentException(descriptorName + ": Output tensor must "
990  "match shape inferred from input tensor.");
991  }
992 
993  for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
994  {
995  if (outputShape[i] != inputShape[i-1])
996  {
997  throw InvalidArgumentException(descriptorName + ": Output tensor must "
998  "match shape inferred from input tensor.");
999  }
1000  }
1001 
1002  if (outputShape.GetNumDimensions() > 5)
1003  {
1004  throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
1005  }
1006 
1007  // Check the supported data types
1008  std::vector<DataType> supportedTypes =
1009  {
1018  };
1019 
1020  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1021 
1022  for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1023  {
1024  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1025  workloadInfo.m_InputTensorInfos[i],
1026  descriptorName,
1027  "input_0",
1028  "input_" + std::to_string(i));
1029  }
1030 
1031  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1032  workloadInfo.m_OutputTensorInfos[0],
1033  descriptorName,
1034  "input_0",
1035  "output");
1036 }
1037 
1038 void FillQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1039 {
1040  const std::string descriptorName{"FillQueueDescriptor"};
1041 
1042  ValidateNumInputs(workloadInfo, descriptorName, 1);
1043  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1044 
1045  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1046  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1047 
1048  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 1, "input");
1049 
1050  std::vector<DataType> supportedTypes =
1051  {
1056  };
1057 
1058  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1059 }
1060 
1062 {
1063  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
1064 
1065  uint32_t numInputs = 2;
1066  if (m_Parameters.m_BiasEnabled)
1067  {
1068  numInputs = 3;
1069  }
1070 
1071  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1072  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1073 
1074  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1075  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1076 
1077  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1078 
1079  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
1080  {
1081  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
1082  }
1083 
1084  TensorInfo weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
1085  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
1086 
1087  if (m_Parameters.m_BiasEnabled)
1088  {
1089  TensorInfo biasTensorInfo = workloadInfo.m_InputTensorInfos[2];
1090  // Validates type and quantization values.
1091  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1092  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1093  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1094  }
1095 
1096  // Check the supported data types
1097  std::vector<DataType> supportedTypes =
1098  {
1105  };
1106 
1107  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1108 
1109  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1110  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1111  {
1112  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1113  {
1114  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1115  "for BFloat16 input.");
1116  }
1117  }
1118  else
1119  {
1120  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1121  }
1122 }
1123 
1125 {
1126  const std::string descriptorName{"NormalizationQueueDescriptor"};
1127 
1128  ValidateNumInputs(workloadInfo, descriptorName, 1);
1129  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1130 
1131  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1132  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1133 
1134  // Check the supported data types
1135  std::vector<DataType> supportedTypes =
1136  {
1143  };
1144 
1145  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1146 
1147  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1148 
1149  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1150 }
1151 
1152 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1153 {
1154  const std::string descriptorName{"AdditionQueueDescriptor"};
1155 
1156  ValidateNumInputs(workloadInfo, descriptorName, 2);
1157  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1158 
1159  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1160  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1161  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1162 
1163  std::vector<DataType> supportedTypes =
1164  {
1172  };
1173 
1174  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1175  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1176  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1177 
1178  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1179  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1180 
1181  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1182  inputTensorInfo1,
1183  outputTensorInfo,
1184  descriptorName,
1185  "input_0",
1186  "input_1");
1187 }
1188 
1190 {
1191  const std::string descriptorName{"MultiplicationQueueDescriptor"};
1192 
1193  ValidateNumInputs(workloadInfo, descriptorName, 2);
1194  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1195 
1196  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1197  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1198  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1199 
1200  std::vector<DataType> supportedTypes =
1201  {
1209  };
1210 
1211  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1212  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1213  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1214 
1215  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1216  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1217 
1218  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1219  inputTensorInfo1,
1220  outputTensorInfo,
1221  descriptorName,
1222  "input_0",
1223  "input_1");
1224 }
1225 
1227 {
1228  const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
1229 
1230  ValidateNumInputs(workloadInfo, descriptorName, 1);
1231  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1232 
1233  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1234  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1235 
1236  std::vector<DataType> supportedTypes =
1237  {
1244  };
1245 
1246  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1247  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1248 
1249  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1250  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1251 
1252  ValidatePointer(m_Mean, descriptorName, "mean");
1253  ValidatePointer(m_Variance, descriptorName, "variance");
1254  ValidatePointer(m_Beta, descriptorName, "beta");
1255  ValidatePointer(m_Gamma, descriptorName, "gamma");
1256 
1257  const TensorInfo& mean = m_Mean->GetTensorInfo();
1258  const TensorInfo& variance = m_Variance->GetTensorInfo();
1259  const TensorInfo& beta = m_Beta->GetTensorInfo();
1260  const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1261 
1262  ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1263  ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1264  ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1265  ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
1266 
1267  ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1268  ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1269  ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
1270 }
1271 
1273 {
1274  const std::string descriptorName{"Convolution2dQueueDescriptor"};
1275 
1276  uint32_t numInputs = 2;
1277  if (m_Parameters.m_BiasEnabled)
1278  {
1279  numInputs = 3;
1280  }
1281 
1282  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1283  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1284 
1285  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1286  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1287 
1288  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1289  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1290 
1291  const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
1292 
1293  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1294 
1295  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1296 
1297  Optional<TensorInfo> optionalBiasTensorInfo;
1298  if (m_Parameters.m_BiasEnabled)
1299  {
1300  optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
1301  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1302 
1303  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1304  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1305  }
1306 
1307  if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1308  {
1310  fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1311  "cannot be either negative or 0.",
1312  descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1313  }
1314 
1315  ValidatePerAxisQuantization(inputTensorInfo,
1316  outputTensorInfo,
1317  weightTensorInfo,
1318  optionalBiasTensorInfo,
1319  descriptorName);
1320 
1321  std::vector<DataType> supportedTypes =
1322  {
1330  };
1331 
1332  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1333 
1334  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1335  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1336  {
1337  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1338  {
1339  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1340  "for BFloat16 input.");
1341  }
1342  }
1343  else
1344  {
1345  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1346  }
1347 }
1348 
1350 {
1351  const std::string descriptorName{"Convolution3dQueueDescriptor"};
1352 
1353  uint32_t numInputs = 2;
1354  if (m_Parameters.m_BiasEnabled)
1355  {
1356  numInputs = 3;
1357  }
1358  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1359  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1360 
1361  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1362  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1363 
1364  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
1365  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
1366 
1367  const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
1368  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 5, "weight");
1369 
1370  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1371 
1372  Optional<TensorInfo> optionalBiasTensorInfo;
1373  if (m_Parameters.m_BiasEnabled)
1374  {
1375  optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
1376  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1377 
1378  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1379  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1380  }
1381 
1382  if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 || m_Parameters.m_StrideZ <= 0 )
1383  {
1385  fmt::format("{}: strideX (provided {}), strideY (provided {}) or strideZ (provided {})"
1386  "cannot be either negative or 0.",
1387  descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY, m_Parameters.m_StrideZ));
1388  }
1389 
1390  ValidatePerAxisQuantization(inputTensorInfo,
1391  outputTensorInfo,
1392  weightTensorInfo,
1393  optionalBiasTensorInfo,
1394  descriptorName);
1395 
1396  std::vector<DataType> supportedTypes =
1397  {
1405  };
1406 
1407  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1408  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1409 }
1410 
1412 {
1413  const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1414 
1415  uint32_t numInputs = 2;
1416  if (m_Parameters.m_BiasEnabled)
1417  {
1418  numInputs = 3;
1419  }
1420 
1421  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1422  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1423 
1424  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1425  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1426 
1427  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1428  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1429 
1430  const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
1431  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1432 
1433  if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1434  {
1436  fmt::format("{}: dilationX (provided {}) and dilationY (provided {}) "
1437  "cannot be smaller than 1.",
1438  descriptorName, m_Parameters.m_DilationX, m_Parameters.m_DilationX));
1439  }
1440 
1441  if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1442  {
1444  fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1445  "cannot be either negative or 0.",
1446  descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1447  }
1448 
1449  if (weightTensorInfo.GetShape()[0] != 1)
1450  {
1451  throw InvalidArgumentException(fmt::format(
1452  "{0}: The weight format in armnn is expected to be [1, H, W, Cout]."
1453  "But first dimension is not equal to 1. Provided weight shape: [{1}, {2}, {3}, {4}]",
1454  descriptorName,
1455  weightTensorInfo.GetShape()[0],
1456  weightTensorInfo.GetShape()[1],
1457  weightTensorInfo.GetShape()[2],
1458  weightTensorInfo.GetShape()[3]));
1459  }
1460 
1461  const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1462  const unsigned int numWeightOutputChannelsRefFormat = weightTensorInfo.GetShape()[3];
1463  const unsigned int numWeightOutputChannelsAclFormat = weightTensorInfo.GetShape()[1];
1464  const unsigned int numOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1465 
1466  // Weights format has two valid options: [1, H, W, Cout] (CpuRef) or [1, Cout, H, W] (CpuAcc/GpuAcc).
1467  bool validRefFormat = (numWeightOutputChannelsRefFormat == numOutputChannels);
1468  bool validAclFormat = (numWeightOutputChannelsAclFormat == numOutputChannels);
1469 
1470  if (!(validRefFormat || validAclFormat))
1471  {
1472  throw InvalidArgumentException(fmt::format(
1473  "{0}: The weight format in armnn is expected to be [1, H, W, Cout] (CpuRef) or [1, Cout, H, W] "
1474  "(CpuAcc/GpuAcc). But neither the 4th (CpuRef) or 2nd (CpuAcc/GpuAcc) dimension is equal to Cout."
1475  "Cout = {1} Provided weight shape: [{2}, {3}, {4}, {5}]",
1476  descriptorName,
1477  numOutputChannels,
1478  weightTensorInfo.GetShape()[0],
1479  weightTensorInfo.GetShape()[1],
1480  weightTensorInfo.GetShape()[2],
1481  weightTensorInfo.GetShape()[3]));
1482  }
1483 
1484  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1485 
1486  Optional<TensorInfo> optionalBiasTensorInfo;
1487  if (m_Parameters.m_BiasEnabled)
1488  {
1489  optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
1490  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1491 
1492  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1493  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1494  }
1495  ValidatePerAxisQuantization(inputTensorInfo,
1496  outputTensorInfo,
1497  weightTensorInfo,
1498  optionalBiasTensorInfo,
1499  descriptorName);
1500 
1501  std::vector<DataType> supportedTypes =
1502  {
1509  };
1510 
1511  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1512  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1513 }
1514 
1515 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1516 {
1517  const std::string descriptorName{"PermuteQueueDescriptor"};
1518 
1519  ValidateNumInputs(workloadInfo, descriptorName, 1);
1520  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1521 
1522  const PermutationVector& mapping = m_Parameters.m_DimMappings;
1523 
1524  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1525  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1526 
1527  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1528  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1529 
1530  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1531  {
1532  if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1533  {
1534  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1535  " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1536  "must match dst dimension " + to_string(mapping[i]) +
1537  " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1538  }
1539  }
1540 
1541  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1542 }
1543 
1544 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1545 {
1546  const std::string descriptorName{"Pooling2dQueueDescriptor"};
1547 
1548  ValidateNumInputs(workloadInfo, descriptorName, 1);
1549  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1550 
1551  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1552  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1553 
1554  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1555  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1556 
1557  std::vector<DataType> supportedTypes =
1558  {
1565  };
1566 
1567  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1568  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1569 }
1570 
1571 void Pooling3dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1572 {
1573  const std::string descriptorName{"Pooling3dQueueDescriptor"};
1574 
1575  ValidateNumInputs(workloadInfo, descriptorName, 1);
1576  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1577 
1578  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1579  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1580 
1581  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
1582  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
1583 
1584  std::vector<DataType> supportedTypes =
1585  {
1592  };
1593 
1594  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1595  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1596 }
1597 
1598 
1599 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1600 {
1601  const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1602 
1603  ValidateNumInputs(workloadInfo, descriptorName, 1);
1604  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1605 
1606  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1607  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1608 
1609  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1610  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1611 
1612  std::vector<DataType> supportedTypes =
1613  {
1620  };
1621 
1622  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1623  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1624 
1625  // ResizeBilinear only changes width and height: batch and channel count must match.
1626  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1627  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1628  if (inputBatchSize != outputBatchSize)
1629  {
1631  fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1632  descriptorName, inputBatchSize, outputBatchSize));
1633  }
1634 
1635  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1636  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1637  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1638  if (inputChannelCount != outputChannelCount)
1639  {
1641  fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1642  descriptorName, inputChannelCount, outputChannelCount));
1643  }
1644 }
1645 
1646 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1647 {
1648  const std::string descriptorName{"ResizeQueueDescriptor"};
1649 
1650  ValidateNumInputs(workloadInfo, descriptorName, 1);
1651  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1652 
1653  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1654  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1655 
1656  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1657  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1658 
1659  std::vector<DataType> supportedTypes =
1660  {
1667  };
1668 
1669  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1670  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1671 
1672  // Resize only changes width and height: batch and channel count must match.
1673  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1674  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1675  if (inputBatchSize != outputBatchSize)
1676  {
1678  fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1679  descriptorName, inputBatchSize, outputBatchSize));
1680  }
1681 
1682  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1683  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1684  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1685  if (inputChannelCount != outputChannelCount)
1686  {
1688  fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1689  descriptorName, inputChannelCount, outputChannelCount));
1690  }
1691 }
1692 
1694 {
1695  const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1696 
1697  ValidateNumInputs(workloadInfo, descriptorName, 1);
1698  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1699 
1700  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1701  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1702 
1703  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1704  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1705 
1706  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1707 
1708  if (m_Parameters.m_Min > m_Parameters.m_Max)
1709  {
1710  throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1711  }
1712 }
1713 
1715 {
1716  const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1717 
1718  ValidateNumInputs(workloadInfo, descriptorName, 1);
1719  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1720 
1721  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1722  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1723 
1724  if (inputTensorInfo.GetNumDimensions() > 4)
1725  {
1726  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1727  }
1728 
1729  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1730 
1731  // Check the supported data types
1732  std::vector<DataType> supportedTypes =
1733  {
1737  };
1738 
1739  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1740  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1741 }
1742 
1744 {
1745  const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1746 
1747  ValidateNumInputs(workloadInfo, descriptorName, 1);
1748  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1749 
1750  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1751  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1752 
1753  if (inputTensorInfo.GetNumDimensions() > 4)
1754  {
1755  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1756  }
1757 
1758  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1759 
1760  // Check the supported data types
1761  std::vector<DataType> supportedTypes =
1762  {
1769  };
1770 
1771  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1772  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1773 }
1774 
1775 void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1776 {
1777  const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1778 
1779  ValidateNumInputs(workloadInfo, descriptorName, 1);
1780  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1781 
1782  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1783  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1784 
1785  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1786 
1787  std::vector<DataType> supportedTypes =
1788  {
1792  };
1793 
1794  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1795  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1796 }
1797 
1798 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1799 {
1800  const std::string descriptorName{"ConstantQueueDescriptor"};
1801 
1802  ValidateNumInputs(workloadInfo, descriptorName, 0);
1803  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1804 
1805  if (!m_LayerOutput)
1806  {
1807  throw InvalidArgumentException(descriptorName + ": No const input specified.");
1808  }
1809 
1810  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1811  ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1812 
1813  // Check the supported data types
1814  std::vector<DataType> supportedTypes =
1815  {
1824  };
1825 
1826  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1827 }
1828 
1829 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1830 {
1831  const std::string descriptorName{"ReshapeQueueDescriptor"};
1832 
1833  ValidateNumInputs(workloadInfo, descriptorName, 1);
1834  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1835 
1836  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1837  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1838 
1839  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1840 
1841  // Check the supported data types
1842  std::vector<DataType> supportedTypes =
1843  {
1852  };
1853 
1854  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1855  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1856 }
1857 
1859 {
1860  const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1861 
1862  ValidateNumInputs(workloadInfo, descriptorName, 1);
1863  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1864 
1865  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1866  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1867 
1868  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1869  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1870 
1871  if (m_Parameters.m_BlockShape.size() != 2)
1872  {
1873  throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1874  }
1875 
1876  if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1877  {
1878  throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1879  "dimensions as Block Shape.");
1880  }
1881 
1882  const TensorShape& inputShape = inputTensorInfo.GetShape();
1883 
1884  std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1885  std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1886 
1887  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1888 
1889  const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1890  widthPad.first + widthPad.second;
1891  const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1892  heightPad.first + heightPad.second;
1893 
1894  const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1895  inputShape[dimensionIndices.GetChannelsIndex()];
1896  const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1897 
1898  if (numOutputElements != numInputElements)
1899  {
1900  throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1901  to_string(numInputElements) + " after padding but output tensor has " +
1902  to_string(numOutputElements) + " elements.");
1903  }
1904 
1905  if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1906  {
1907  throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1908  "divisible by Block Shape in all spatial dimensions");
1909  }
1910 
1911  std::vector<DataType> supportedTypes =
1912  {
1919  };
1920 
1921  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1922  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1923 }
1924 
1926 {
1927  const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1928 
1929  ValidateNumInputs(workloadInfo, descriptorName, 1);
1930  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1931 
1932  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1933  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1934 
1935  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1936  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1937 
1938  std::vector<DataType> supportedTypes =
1939  {
1946  };
1947 
1948  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1949  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1950 
1951  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1952 
1953  if (m_Parameters.m_BlockSize == 0)
1954  {
1955  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1956  }
1957 
1958  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1959  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1960  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1961  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1962 
1963  const TensorShape& inputShape = inputTensorInfo.GetShape();
1964  if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1965  {
1966  throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1967  "by block size in all spatial dimensions");
1968  }
1969 
1970  const TensorShape& outputShape = outputTensorInfo.GetShape();
1971  if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1972  {
1973  throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1974  "must be divisible by the square of block size." );
1975  }
1976 }
1977 
1978 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1979 {
1980  const std::string descriptorName{"FloorQueueDescriptor"};
1981 
1982  ValidateNumInputs(workloadInfo, descriptorName, 1);
1983  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1984 
1985  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1986  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1987 
1988  std::vector<DataType> supportedTypes =
1989  {
1994  };
1995 
1996  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1997  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1998  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1999  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2000 }
2001 
2002 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2003 {
2004  // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
2005 
2006  const std::string descriptorName{"LstmQueueDescriptor"};
2007 
2008  // check dimensions of all inputs and outputs
2009  if (workloadInfo.m_InputTensorInfos.size() != 3)
2010  {
2011  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
2012  }
2013  if (workloadInfo.m_OutputTensorInfos.size() != 4)
2014  {
2015  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
2016  }
2017 
2018  std::vector<DataType> supportedTypes =
2019  {
2024  };
2025 
2026  // check for supported type of one input and match them with all the other input and output
2027  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
2028 
2029  // type matches all other inputs
2030  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
2031  {
2032  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
2033  workloadInfo.m_InputTensorInfos[i],
2034  descriptorName,
2035  "input_0",
2036  "input_" + std::to_string(i));
2037  }
2038  // type matches all other outputs
2039  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
2040  {
2041  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
2042  workloadInfo.m_OutputTensorInfos[i],
2043  "LstmQueueDescriptor",
2044  "input_0",
2045  "output_" + std::to_string(i));
2046  }
2047 
2048  // Making sure clipping parameters have valid values.
2049  // == 0 means no clipping
2050  // > 0 means clipping
2051  if (m_Parameters.m_ClippingThresCell < 0.0f)
2052  {
2053  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
2054  }
2055  if (m_Parameters.m_ClippingThresProj < 0.0f)
2056  {
2057  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
2058  }
2059 
2060  // Inferring batch size, number of outputs and number of cells from the inputs.
2061  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
2062  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
2063  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
2064  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
2065  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
2066  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
2067 
2068  // input tensor
2069  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
2070  descriptorName + " input_0");
2071  // outputStateInTensor
2072  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
2073  descriptorName + " input_1");
2074  // outputStateInTensor
2075  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
2076  descriptorName + " input_2");
2077  // scratchBufferTensor
2078  unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
2079  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
2080  descriptorName + " output_0");
2081  // outputStateOutTensor
2082  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
2083  descriptorName + " output_1");
2084  // cellStateOutTensor
2085  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
2086  descriptorName + " output_2");
2087  // outputTensor
2088  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
2089  descriptorName + " output_3");
2090 
2091  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
2092  if ( m_InputToInputWeights )
2093  {
2094  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
2095  (n_cell * n_input), "InputLayerNormWeights");
2096  }
2097 
2098  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
2099  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
2100  (n_cell * n_input), "InputToForgetWeights");
2101 
2102  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
2103  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
2104  (n_cell * n_input), "InputToCellWeights");
2105 
2106  if ( m_RecurrentToInputWeights )
2107  {
2108  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
2109  (n_cell * n_output), "RecurrentToInputWeights");
2110  }
2111 
2112  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
2113  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
2114  (n_cell * n_output), "RecurrentToForgetWeights");
2115 
2116  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
2117  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
2118  (n_cell * n_output), "RecurrentToCellWeights");
2119 
2120  // Make sure the input-gate's parameters are either both present (regular
2121  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
2122  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
2123  !m_Parameters.m_CifgEnabled) ||
2124  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
2125  m_Parameters.m_CifgEnabled));
2126  if (!cifg_weights_all_or_none)
2127  {
2128  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
2129  "RecurrentToInputWeights must either both be present (regular LSTM) "
2130  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
2131  "accordingly.");
2132  }
2133 
2134  if ( m_CellToInputWeights )
2135  {
2136  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
2137  n_cell, "CellToInputWeights");
2138  }
2139  if ( m_CellToForgetWeights )
2140  {
2141  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
2142  n_cell, "CellToForgetWeights");
2143  }
2144  if ( m_CellToOutputWeights )
2145  {
2146  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
2147  n_cell, "CellToOutputWeights");
2148  }
2149 
2150  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
2151  bool peephole_weights_all_or_none =
2152  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
2153  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
2154  || ( !m_CellToInputWeights && !m_CellToForgetWeights
2155  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
2156  if (!peephole_weights_all_or_none)
2157  {
2158  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
2159  }
2160 
2161  // Make sure the input gate bias is present only when not a CIFG-LSTM.
2162  if (m_Parameters.m_CifgEnabled)
2163  {
2164  if (m_InputGateBias)
2165  {
2166  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
2167  }
2168  }
2169  else
2170  {
2171  if (!m_InputGateBias)
2172  {
2173  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
2174  "must be present.");
2175  }
2176  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
2177  n_cell, "InputGateBias");
2178  }
2179 
2180  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
2181  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
2182 
2183  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
2184  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
2185 
2186  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
2187  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
2188 
2189  if (m_ProjectionWeights)
2190  {
2191  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
2192  (n_cell * n_output), "ProjectionWeights");
2193  }
2194  if (m_ProjectionBias)
2195  {
2196  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
2197  }
2198 
2199  // Making sure the projection tensors are consistent:
2200  // 1) If projection weight is not present, then projection bias should not be
2201  // present.
2202  // 2) If projection weight is present, then projection bias is optional.
2203  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
2204  !m_Parameters.m_ProjectionEnabled)
2205  || (m_ProjectionWeights && !m_ProjectionBias &&
2206  m_Parameters.m_ProjectionEnabled)
2207  || (m_ProjectionWeights && m_ProjectionBias &&
2208  m_Parameters.m_ProjectionEnabled));
2209  if (!projecton_tensors_consistent)
2210  {
2211  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
2212  }
2213 
2214  // The four layer normalization weights either all have values or none of them have values. Additionally, if
2215  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
2216  // either all have values or none of them have values. Layer normalization is used when the values of all the
2217  // layer normalization weights are present
2218  if (m_InputLayerNormWeights)
2219  {
2220  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
2221  }
2222  if (m_ForgetLayerNormWeights)
2223  {
2224  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2225  }
2226  if (m_CellLayerNormWeights)
2227  {
2228  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2229  }
2230  if (m_OutputLayerNormWeights)
2231  {
2232  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2233  }
2234 
2235  if (m_Parameters.m_LayerNormEnabled)
2236  {
2237  if (!m_Parameters.m_CifgEnabled)
2238  {
2239  if (!m_InputLayerNormWeights)
2240  {
2241  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
2242  "disabled but InputLayerNormWeights are not present");
2243  }
2244  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2245  1, n_cell, "InputLayerNormWeights");
2246  }
2247  else if (m_InputLayerNormWeights)
2248  {
2249  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
2250  "enabled");
2251  }
2252 
2253  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
2254  "ForgetLayerNormWeights");
2255  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2256 
2257  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
2258  "OutputLayerNormWeights");
2259  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2260 
2261  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
2262  "CellLayerNormWeights");
2263  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2264  }
2265  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2266  {
2267  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
2268  "normalisation weights are present.");
2269  }
2270 }
2271 
2273 {
2274  const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"};
2275 
2276  ValidateNumInputs(workloadInfo, descriptorName, 1);
2277  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2278 
2279  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2280  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2281 
2282  if (inputTensorInfo.GetDataType() != DataType::BFloat16)
2283  {
2284  throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16.");
2285  }
2286 
2287  if (outputTensorInfo.GetDataType() != DataType::Float32)
2288  {
2289  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2290  }
2291 
2292  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2293 }
2294 
2296 {
2297  const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
2298 
2299  ValidateNumInputs(workloadInfo, descriptorName, 1);
2300  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2301 
2302  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2303  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2304 
2305  if (inputTensorInfo.GetDataType() != DataType::Float32)
2306  {
2307  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2308  }
2309 
2310  if (outputTensorInfo.GetDataType() != DataType::BFloat16)
2311  {
2312  throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
2313  }
2314 
2315  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2316 }
2317 
2319 {
2320  const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
2321 
2322  ValidateNumInputs(workloadInfo, descriptorName, 1);
2323  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2324 
2325  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2326  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2327 
2328  if (inputTensorInfo.GetDataType() != DataType::Float32)
2329  {
2330  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2331  }
2332 
2333  if (outputTensorInfo.GetDataType() != DataType::Float16)
2334  {
2335  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
2336  }
2337 
2338  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2339 }
2340 
2342 {
2343  const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
2344 
2345  ValidateNumInputs(workloadInfo, descriptorName, 1);
2346  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2347 
2348  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2349  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2350 
2351  if (inputTensorInfo.GetDataType() != DataType::Float16)
2352  {
2353  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
2354  }
2355 
2356  if (outputTensorInfo.GetDataType() != DataType::Float32)
2357  {
2358  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2359  }
2360 
2361  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2362 }
2363 
2364 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2365 {
2366  const std::string descriptorName{"DivisionQueueDescriptor"};
2367 
2368  ValidateNumInputs(workloadInfo, descriptorName, 2);
2369  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2370 
2371  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2372  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2373  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2374 
2375  std::vector<DataType> supportedTypes =
2376  {
2384  };
2385 
2386  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2387  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2388  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2389 
2390  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2391  inputTensorInfo1,
2392  outputTensorInfo,
2393  descriptorName,
2394  "input_0",
2395  "input_1");
2396 }
2397 
2399 {
2400  const std::string descriptorName{"SubtractionQueueDescriptor"};
2401 
2402  ValidateNumInputs(workloadInfo, descriptorName, 2);
2403  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2404 
2405  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2406  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2407  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2408 
2409  std::vector<DataType> supportedTypes =
2410  {
2418  };
2419 
2420  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2421  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2422  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2423 
2424  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2425  inputTensorInfo1,
2426  outputTensorInfo,
2427  descriptorName,
2428  "input_0",
2429  "input_1");
2430 }
2431 
2432 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2433 {
2434  const std::string descriptorName{"MaximumQueueDescriptor"};
2435 
2436  ValidateNumInputs(workloadInfo, descriptorName, 2);
2437  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2438 
2439  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2440  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2441  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2442 
2443  std::vector<DataType> supportedTypes =
2444  {
2452  };
2453 
2454  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2455  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2456  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2457 
2458  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2459  inputTensorInfo1,
2460  outputTensorInfo,
2461  descriptorName,
2462  "input_0",
2463  "input_1");
2464 }
2465 
2466 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2467 {
2468  const std::string descriptorName{"MeanQueueDescriptor"};
2469 
2470  ValidateNumInputs(workloadInfo, descriptorName, 1);
2471  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2472 
2473  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2474  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2475 
2476  std::vector<DataType> supportedTypes =
2477  {
2484  };
2485 
2486  // First check if input tensor data type is supported, then
2487  // check if this data type matches the output tensor data type
2488  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2489  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2490 
2491  if (m_Parameters.m_KeepDims)
2492  {
2493  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2494  }
2495  else if (m_Parameters.m_Axis.empty())
2496  {
2497  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
2498  }
2499  else
2500  {
2501  unsigned int outputDim =
2502  inputTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
2503  ValidateTensorNumDimensions(outputTensorInfo,
2504  descriptorName,
2505  outputDim > 0 ? outputDim : 1,
2506  "output");
2507  }
2508 }
2509 
2510 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2511 {
2512  const std::string descriptorName{"PadQueueDescriptor"};
2513 
2514  ValidateNumInputs(workloadInfo, descriptorName, 1);
2515  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2516 
2517  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2518  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2519 
2520  // input and output should have the same number of dimensions
2521  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2522 
2523  // there should be entry in the pad list for each dimension in the input tensor
2524  if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2525  throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2526  "as there are dimensions in the input tensor that is " +
2527  std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2528  " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
2529  }
2530 }
2531 
2532 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2533 {
2534  const std::string descriptorName{"QuantizeQueueDescriptor"};
2535 
2536  ValidateNumInputs(workloadInfo, descriptorName, 1);
2537  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2538 
2539  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2540  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2541 
2542  std::vector<DataType> supportedTypes =
2543  {
2551  };
2552 
2553  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2554 
2555  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2556  {
2557  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2558  }
2559 }
2560 
2562 {
2563  const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
2564 
2565  ValidateNumInputs(workloadInfo, descriptorName, 1);
2566  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2567 
2568  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2569  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2570 
2571  std::vector<DataType> supportedTypes =
2572  {
2579  };
2580 
2581  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2582  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2583 }
2584 
2586 {
2587  const std::string descriptorName{"StridedSliceQueueDescriptor"};
2588 
2589  ValidateNumInputs(workloadInfo, descriptorName, 1);
2590  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2591 
2592  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2593  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2594 
2595  std::vector<DataType> supportedTypes =
2596  {
2603  };
2604 
2605  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2606  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2607 
2608  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2609 
2610  const uint32_t rank = inputTensorInfo.GetNumDimensions();
2611  if (rank > 4)
2612  {
2613  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2614  }
2615 
2616  // Begin, End & Stride length must be of rank(input0)
2617  if (m_Parameters.m_Begin.size() != rank)
2618  {
2619  throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2620  }
2621 
2622  if (m_Parameters.m_End.size() != rank)
2623  {
2624  throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2625  }
2626 
2627  if (m_Parameters.m_Stride.size() != rank)
2628  {
2629  throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2630  }
2631 
2632  // Stride entries must be non-zero
2633  for (auto& stride : m_Parameters.m_Stride)
2634  {
2635  if (stride == 0)
2636  {
2637  throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2638  }
2639  }
2640 }
2641 
2642 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2643 {
2644  const std::string descriptorName{"MinimumQueueDescriptor"};
2645 
2646  ValidateNumInputs(workloadInfo, descriptorName, 2);
2647  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2648 
2649  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2650  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2651  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2652 
2653  std::vector<DataType> supportedTypes =
2654  {
2662  };
2663 
2664  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2665  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2666  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2667 
2668  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2669  inputTensorInfo1,
2670  outputTensorInfo,
2671  descriptorName,
2672  "input_0",
2673  "input_1");
2674 }
2675 
2676 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2677 {
2678  const std::string descriptorName{"DebugQueueDescriptor"};
2679 
2680  ValidateNumInputs(workloadInfo, descriptorName, 1);
2681  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2682 }
2683 
2684 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2685 {
2686  const std::string descriptorName{"EqualQueueDescriptor"};
2687 
2688  ValidateNumInputs(workloadInfo, descriptorName, 2);
2689  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2690 
2691  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2692  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2693  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2694 
2695  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2696  inputTensorInfo1,
2697  outputTensorInfo,
2698  descriptorName,
2699  "input_0",
2700  "input_1");
2701 
2702  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2703  {
2704  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2705  }
2706 }
2707 
2708 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2709 {
2710  const std::string descriptorName{"GreaterQueueDescriptor"};
2711 
2712  ValidateNumInputs(workloadInfo, descriptorName, 2);
2713  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2714 
2715  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2716  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2717  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2718 
2719  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2720  inputTensorInfo1,
2721  outputTensorInfo,
2722  descriptorName,
2723  "input_0",
2724  "input_1");
2725 
2726  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2727  {
2728  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2729  }
2730 }
2731 
2732 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2733 {
2734  const std::string descriptorName{"RsqrtQueueDescriptor"};
2735 
2736  ValidateNumInputs(workloadInfo, descriptorName, 1);
2737  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2738 
2739  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2740  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2741 
2742  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2743 
2744  std::vector<DataType> supportedTypes =
2745  {
2752  };
2753 
2754  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2755  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2756 }
2757 
2758 void GatherNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2759 {
2760  const std::string descriptorName{"GatherNdQueueDescriptor"};
2761 
2762  ValidateNumInputs(workloadInfo, descriptorName, 2);
2763  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2764 
2765  const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2766  if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2767  {
2768  throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2769  }
2770 
2771  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2772  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2773 
2774  std::vector<DataType> supportedTypes =
2775  {
2783  };
2784 
2785  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2786 
2787  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2788 
2789  unsigned int outputDim = outputTensorInfo.GetNumDimensions();
2790  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2791 }
2792 
2793 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2794 {
2795  const std::string descriptorName{"GatherQueueDescriptor"};
2796 
2797  ValidateNumInputs(workloadInfo, descriptorName, 2);
2798  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2799 
2800  const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2801  if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2802  {
2803  throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2804  }
2805 
2806  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2807  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2808 
2809  std::vector<DataType> supportedTypes =
2810  {
2818  };
2819 
2820  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2821 
2822  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2823 
2824  unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2825  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2826 }
2827 
2829 {
2830  const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2831 
2832  ValidateNumInputs(workloadInfo, descriptorName, 2);
2833 
2834  if (workloadInfo.m_OutputTensorInfos.size() != 4)
2835  {
2836  throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2837  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2838  }
2839 
2840  if (m_Anchors == nullptr)
2841  {
2842  throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2843  }
2844 
2845  const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2846  const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2847  const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2848 
2849  const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2850  const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2851  const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2852  const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2853 
2854  ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2855  ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2856  ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2857 
2858  const std::vector<DataType> supportedInputTypes =
2859  {
2866  };
2867 
2868  ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2869  ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2870  ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2871 
2872  ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2873  ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2874  ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2875  ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2876 
2877  // NOTE: Output is always Float32 regardless of input type
2878  ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2879  ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2880  ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2881  ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2882 
2883  if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2884  {
2885  throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2886  "must be positive and less than or equal to 1.");
2887  }
2888 
2889  if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2890  {
2891  throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2892  "should be equal to number of classes + 1.");
2893  }
2894 }
2895 
2896 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2897 {
2898  const std::string& descriptorName{"DequantizeQueueDescriptor"};
2899 
2900  ValidateNumInputs(workloadInfo, descriptorName, 1);
2901  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2902 
2903  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2904  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2905 
2906  std::vector<DataType> inputSupportedTypes =
2907  {
2913  };
2914  ValidateDataTypes(inputTensorInfo, inputSupportedTypes, descriptorName);
2915 
2916  std::vector<DataType> outputSupportedTypes =
2917  {
2920  DataType::Float16
2921  };
2922 
2923  ValidateDataTypes(outputTensorInfo, outputSupportedTypes, descriptorName);
2924 }
2925 
2926 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2927 {
2928  const std::string& descriptorName{"MergeQueueDescriptor"};
2929 
2930  ValidateNumInputs(workloadInfo, descriptorName, 2);
2931  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2932 
2933  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2934  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2935  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2936 
2937  ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2938  ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2939 
2940  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2941  ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2942 }
2943 
2944 void ShapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2945 {
2946  const std::string& descriptorName{"ShapeQueueDescriptor"};
2947 
2948  ValidateNumInputs(workloadInfo, descriptorName, 1);
2949  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2950 
2951  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2952  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2953 
2954  std::vector<DataType> supportedTypes =
2955  {
2965  };
2966 
2967  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2968  ValidateDataTypes(outputTensorInfo, {DataType::Signed32}, descriptorName);
2969 }
2970 
2971 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2972 {
2973  const std::string& descriptorName{"SwitchQueueDescriptor"};
2974 
2975  ValidateNumInputs(workloadInfo, descriptorName, 2);
2976  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2977 
2978  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2979  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2980 
2981  const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2982  const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2983 
2984  std::vector<DataType> supportedTypes =
2985  {
2991  };
2992 
2993  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2994  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2995 
2996  ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2997  ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2998 
2999  ValidateTensorShapesMatch(inputTensorInfo0,
3000  outputTensorInfo0,
3001  descriptorName,
3002  "input_0",
3003  "output_0");
3004 
3005  ValidateTensorShapesMatch(inputTensorInfo0,
3006  outputTensorInfo1,
3007  descriptorName,
3008  "input_0",
3009  "output_1");
3010 }
3011 
3012 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
3013 {
3014  // This is internally generated so it should not need validation.
3015 }
3016 
3017 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3018 {
3019  const std::string& descriptorName{"PreluQueueDescriptor"};
3020 
3021  ValidateNumInputs(workloadInfo, descriptorName, 2);
3022  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3023 
3024  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3025  const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
3026  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3027 
3028  std::vector<DataType> supportedTypes
3029  {
3036  };
3037 
3038  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3039  ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
3040 
3041  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
3042 
3043  ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
3044  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
3045 
3046  ValidateBroadcastTensorShapesMatch(inputTensorInfo,
3047  alphaTensorInfo,
3048  outputTensorInfo,
3049  descriptorName,
3050  "input",
3051  "alpha");
3052 }
3053 
3055 {
3056  const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
3057 
3058  ValidateNumInputs(workloadInfo, descriptorName, 1);
3059  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3060 
3061  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3062  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3063 
3064  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
3065  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
3066 
3067  ValidatePointer(m_Weight, descriptorName, "weight");
3068 
3069  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
3070  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
3071 
3072  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
3073 
3074  Optional<TensorInfo> optionalBiasTensorInfo;
3075  if (m_Parameters.m_BiasEnabled)
3076  {
3077  ValidatePointer(m_Bias, descriptorName, "bias");
3078 
3079  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
3080  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
3081 
3082  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
3083  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
3084  }
3085 
3086  ValidatePerAxisQuantization(inputTensorInfo,
3087  outputTensorInfo,
3088  weightTensorInfo,
3089  optionalBiasTensorInfo,
3090  descriptorName);
3091 
3092  std::vector<DataType> supportedTypes =
3093  {
3100  };
3101 
3102  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3103  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3104 }
3105 
3106 void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3107 {
3108  const std::string descriptorName{"TransposeQueueDescriptor"};
3109 
3110  ValidateNumInputs(workloadInfo, descriptorName, 1);
3111  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3112 
3113  const PermutationVector& mapping = m_Parameters.m_DimMappings;
3114 
3115  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3116  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3117 
3118  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
3119  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
3120 
3121  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
3122  {
3123  if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
3124  {
3125  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
3126  " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
3127  "must match dst dimension " + to_string(i) +
3128  " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
3129  }
3130  }
3131 
3132  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3133 }
3134 
3136 {
3137  const std::string descriptorName{"TransposeQueueDescriptor"};
3138 
3139  ValidateNumInputs(workloadInfo, descriptorName, 1);
3140  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3141 
3142  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3143  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3144 
3145  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3146 }
3147 
3148 void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3149 {
3150  const std::string descriptorName{"QLstmQueueDescriptor"};
3151 
3152  // Validate number of inputs/outputs
3153  ValidateNumInputs(workloadInfo, descriptorName, 3);
3154  ValidateNumOutputs(workloadInfo, descriptorName, 3);
3155 
3156  // Input/output tensor info
3157  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3158  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[1];
3159  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[2];
3160 
3161  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3162  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3163  auto outputInfo = workloadInfo.m_OutputTensorInfos[2];
3164 
3165  // Supported types for various tensors in QLSTM
3166  std::vector<DataType> inputOutputSupportedTypes =
3167  {
3169  };
3170 
3171  std::vector<DataType> cellStateSupportedTypes =
3172  {
3174  };
3175 
3176  std::vector<DataType> weightsSupportedTypes =
3177  {
3179  };
3180 
3181  std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
3182  {
3184  };
3185 
3186  std::vector<DataType> biasSupportedTypes =
3187  {
3189  };
3190 
3191  // Validate types of input/output tensors
3192  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3193  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3194  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3195 
3196  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3197  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3198  ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
3199 
3200  // Validate matching types of input/output tensors
3201  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3202  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3203  "outputStateIn", "outputStateOut");
3204  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3205 
3206  // Infer number of batches, number of units, input size and output size from tensor dimensions
3207  const uint32_t numBatches = inputInfo.GetShape()[0];
3208  const uint32_t inputSize = inputInfo.GetShape()[1];
3209  const uint32_t outputSize = outputStateInInfo.GetShape()[1];
3210  const uint32_t numUnits = cellStateInInfo.GetShape()[1];
3211 
3212  // Validate number of dimensions and number of elements for input/output tensors
3213  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3214  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3215  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName + " cellStateIn");
3216 
3217  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3218  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName + " cellStateOut");
3219  ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName + " output");
3220 
3221  // Validate number of dimensions and number of elements for MANDATORY weight tensors
3222  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3223  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3224  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize), " InputToForgetWeights");
3225 
3226  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3227  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3228  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize), " InputToCellWeights");
3229 
3230  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3231  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3232  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize), " InputToOutputWeights");
3233 
3234  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3235  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3236  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
3237  " RecurrentToForgetWeights");
3238 
3239  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3240  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3241  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3242 
3243  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3244  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3245  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3246 
3247  // Validate data types for MANDATORY weights tensors (all should match each other)
3248  ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
3249 
3250  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
3251  "inputToForgetWeights", "inputToCellWeights");
3252  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3253  "inputToForgetWeights", "inputToOutputWeights");
3254 
3255  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3256  "inputToForgetWeights", "recurrentToForgeteights");
3257  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3258  "inputToForgetWeights", "recurrentToCellWeights");
3259  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3260  "inputToForgetWeights", "recurrentToOutputWeights");
3261 
3262  // Validate number of dimensions and number of elements for MANDATORY bias tensors
3263  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3264  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3265  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits, " ForgetGateBias");
3266 
3267  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3268  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3269  ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits, " CellBias");
3270 
3271  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3272  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3273  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits, " OutputGateBias");
3274 
3275  // Validate data types for MANDATORY bias tensors
3276  ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
3277 
3278  ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
3279  "forgetGateBias", "cellBias");
3280  ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
3281  "forgetGateBias", "outputGateBias");
3282 
3283  // Validate OPTIONAL params: CIFG (inputToInputWeights, recurrentToInputWeights, inputGateBias)
3284  const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
3285  !m_Parameters.m_CifgEnabled) ||
3286  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3287  !m_InputGateBias && m_Parameters.m_CifgEnabled));
3288 
3289  if (!allCifgParamsPresentOrNot)
3290  {
3291  throw InvalidArgumentException(descriptorName +
3292  ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present "
3293  "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be "
3294  "set appropriately.");
3295  }
3296 
3297  if (!m_Parameters.m_CifgEnabled)
3298  {
3299  // Validate number of dimensions and number of elements
3300  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3301  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize), " InputToInputWeights");
3302 
3303  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3304  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3305  " RecurrentToInputWeights");
3306 
3307  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3308  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits, " InputGateBias");
3309 
3310  // Validate data types
3311  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3312  "inputToForgetWeights", "inputToInputWeights");
3313  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3314  "inputToForgetWeights", "recurrentToInputWeights");
3315  ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3316  "forgetGateBias", "inputGateBias");
3317  }
3318 
3319  // Validate OPTIONAL params: Peephole (cellToInputWeights, cellToForgetWeights, cellToOutputWeights)
3320  bool allPeepholeWeightsPresentOrNot =
3321  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3322  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3323  || (!m_CellToInputWeights && !m_CellToForgetWeights
3324  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3325 
3326  if (!allPeepholeWeightsPresentOrNot)
3327  {
3328  throw InvalidArgumentException(descriptorName +
3329  ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole "
3330  "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present "
3331  "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set "
3332  "appropriately.");
3333  }
3334 
3335  if (m_Parameters.m_PeepholeEnabled)
3336  {
3337  auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3338  ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits, " cellToForgetWeights");
3339  ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3340 
3341  auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3342  ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits, " cellToOutputWeights");
3343  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3344  "cellToForgetWeight", "cellToOutputWeights");
3345 
3346  if (!m_Parameters.m_CifgEnabled)
3347  {
3348  auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3349  ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits, " cellToInputWeights");
3350  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3351  "cellToForgetWeights", "cellToInputWeights");
3352  }
3353  }
3354 
3355  // Validate OPTIONAL params: Layer Norm Weights
3356  bool allLayerNormWeightsPresentOrNot =
3357  (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3358  && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3359  || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3360  && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3361 
3362  if (!allLayerNormWeightsPresentOrNot)
3363  {
3364  throw InvalidArgumentException(descriptorName +
3365  ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights "
3366  "and CellLayerNormWeights should all be present (Layer Norm enabled) or not "
3367  "be present at all (Layer Norm disabled). InputLayerNormWeights should "
3368  "only be present when Layer Norm is enabled and CIFG is disabled. "
3369  "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3370  }
3371 
3372  if (m_Parameters.m_LayerNormEnabled)
3373  {
3374  auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3375  ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits, " forgetLayerNormWeights");
3376  ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3377 
3378  auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3379  ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits, " cellLayerNormWeights");
3380  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3381  "forgetLayerNormWeights", "cellLayerNormWeights");
3382 
3383  auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3384  ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits, " outputLayerNormWeights");
3385  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3386  "forgetLayerNormWeights", "outputLayerNormWeights");
3387 
3388  if (!m_Parameters.m_CifgEnabled)
3389  {
3390  auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3391  ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits, " inputLayerNormWeights");
3392  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3393  "forgetLayerNormWeights", "inputLayerNormWeights");
3394  }
3395  }
3396 
3397  // Validate OPTIONAL params: Projection (projectionWeights, projectionBias)
3398  bool correctProjectionTensorsPresent =
3399  ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3400  (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3401  (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3402 
3403  if (!correctProjectionTensorsPresent)
3404  {
3405  throw InvalidArgumentException(descriptorName +
3406  ": If projection is enabled, ProjectionWeights should be present and "
3407  "ProjectionBias is optional. If projection is disabled, neither "
3408  "ProjectionWeights nor ProjectionBias should be present.");
3409  }
3410 
3411  if (m_Parameters.m_ProjectionEnabled)
3412  {
3413  auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3414  ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize), "ProjectionWeights");
3415  ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3416 
3417  if (m_ProjectionBias)
3418  {
3419  auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
3420  ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize, "ProjectionBias");
3421  ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3422  }
3423 
3424  }
3425  else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3426  outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3427  throw InvalidArgumentException(descriptorName +
3428  ": If projection is disabled, output quantization info (scale, offset) "
3429  "should match HiddenStateScale and HiddenStateZeroPoint.");
3430  }
3431 
3432 }
3433 
3435 {
3436  const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
3437 
3438  // Validate number of inputs/outputs
3439  ValidateNumInputs(workloadInfo, descriptorName, 3);
3440  ValidateNumOutputs(workloadInfo, descriptorName, 2);
3441 
3442  // Input/output tensor infos
3443  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3444  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
3445  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
3446 
3447  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3448  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3449 
3450  std::vector<DataType> inputOutputSupportedTypes =
3451  {
3453  };
3454 
3455  std::vector<DataType> cellStateSupportedTypes =
3456  {
3458  };
3459 
3460  std::vector<DataType> weightsSupportedTypes =
3461  {
3463  };
3464 
3465  std::vector<DataType> biasSupportedTypes =
3466  {
3468  };
3469 
3470  // Validate types of input/output tensors
3471  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3472  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3473  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3474 
3475  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3476  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3477 
3478  // Validate matching types of input/output tensors
3479  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3480  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3481  "outputStateIn", "outputStateOut");
3482  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3483 
3484  // Validate matching quantization info for input/output tensors
3485  ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3486  ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
3487  ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3488 
3489  // Infer number of batches, input size and output size from tensor dimensions
3490  const uint32_t numBatches = inputInfo.GetShape()[0];
3491  const uint32_t inputSize = inputInfo.GetShape()[1];
3492  const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3493 
3494  // Validate number of dimensions and number of elements for input/output tensors
3495  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3496  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
3497  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3498  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
3499  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3500 
3501  // Validate number of dimensions and number of elements for weights tensors
3502  ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
3503  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3504  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
3505 
3506  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3507  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3508  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
3509 
3510  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3511  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3512  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
3513 
3514  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3515  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3516  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
3517 
3518  ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
3519  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3520  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
3521 
3522  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3523  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3524  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3525  " RecurrentToForgetWeights");
3526 
3527  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3528  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3529  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3530 
3531  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3532  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3533  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3534 
3535  // Validate data types for weights tensors (all should match each other)
3536  ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3537 
3538  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3539  "inputToInputWeights", "inputToForgetWeights");
3540  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3541  "inputToInputWeights", "inputToCellWeights");
3542  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3543  "inputToInputWeights", "inputToOutputWeights");
3544 
3545  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3546  "inputToInputWeights", "recurrentToInputWeights");
3547  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3548  "inputToInputWeights", "recurrentToForgeteights");
3549  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3550  "inputToInputWeights", "recurrentToCellWeights");
3551  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3552  "inputToInputWeights", "recurrentToOutputWeights");
3553 
3554  // Validate matching quantization info for weight tensors (all should match each other)
3555  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3556  descriptorName, "inputToInputWeights", "inputToForgetWeights");
3557  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3558  descriptorName, "inputToInputWeights", "inputToCellWeights");
3559  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3560  descriptorName, "inputToInputWeights", "inputToOutputWeights");
3561 
3562  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3563  descriptorName, "inputToInputWeights", "recurrentToInputWeights");
3564  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3565  descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
3566  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3567  descriptorName, "inputToInputWeights", "recurrentToCellWeights");
3568  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3569  descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
3570 
3571  // Validate number of dimensions and number of elements in bias tensors
3572  ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
3573  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3574  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
3575 
3576  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3577  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3578  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
3579 
3580  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3581  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3582  ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
3583 
3584  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3585  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3586  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
3587 
3588  // Validate data types for bias tensors (all should match each other)
3589  ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3590 
3591  ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3592  "inputGateBias", "forgetGateBias");
3593  ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3594  "inputGateBias", "cellBias");
3595  ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3596  "inputGateBias", "outputGateBias");
3597 
3598  // Validate bias tensor quantization info
3599  ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3600  ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3601  ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3602  ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3603 }
3604 
3605 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3606 {
3607  const std::string descriptorName{"AbsQueueDescriptor"};
3608 
3609  ValidateNumInputs(workloadInfo, descriptorName, 1);
3610  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3611 
3612  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3613  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3614 
3615  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3616 
3617  std::vector<DataType> supportedTypes =
3618  {
3626  };
3627 
3628  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3629  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3630 }
3631 
3632 void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3633 {
3634  const std::string descriptorName{"SliceQueueDescriptor"};
3635 
3636  ValidateNumInputs(workloadInfo, descriptorName, 1);
3637  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3638 
3639  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3640  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3641 
3642  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3643 
3644  const unsigned int rank = inputTensorInfo.GetNumDimensions();
3645  if (rank > 4)
3646  {
3647  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
3648  }
3649 
3650  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
3651 
3652  // Check if m_Begin and m_Size have the expected length
3653  if (m_Parameters.m_Begin.size() != rank)
3654  {
3655  throw InvalidArgumentException(descriptorName +
3656  ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3657  }
3658  if (m_Parameters.m_Size.size() != rank)
3659  {
3660  throw InvalidArgumentException(descriptorName +
3661  ": Length of size descriptor must equal rank " + std::to_string(rank));
3662  }
3663 
3664  // Check if the shape of the output tensor matches m_Size
3665  const TensorShape& outputShape = outputTensorInfo.GetShape();
3666  for (unsigned int i = 0u; i < rank; ++i)
3667  {
3668  if (m_Parameters.m_Size[i] != outputShape[i])
3669  {
3670  throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
3671  }
3672  }
3673 
3674  // Check if the sum of begin offset and size in a given dimension
3675  // does not exceed the size of corresponding input
3676  const TensorShape& inputShape = inputTensorInfo.GetShape();
3677  for(unsigned int i = 0u; i < rank; ++i)
3678  {
3679  if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
3680  {
3681  throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
3682  std::to_string(i) + " exceeds input size.");
3683  }
3684  }
3685 }
3686 
3688 {
3689  const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
3690 
3691  ValidateNumInputs(workloadInfo, descriptorName, 1);
3692  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3693 
3694  const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
3695  const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
3696 
3697  ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
3698  ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
3699 
3700  std::vector<DataType> supportedTypes =
3701  {
3708  };
3709 
3710  ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3711  ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3712 
3713  ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
3714 
3715  if (m_Parameters.m_BlockSize == 0)
3716  {
3717  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
3718  }
3719 
3720  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
3721  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
3722  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
3723  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
3724 
3725  const TensorShape& outputShape = outputInfo.GetShape();
3726  if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3727  {
3728  throw InvalidArgumentException(descriptorName + ": Output width and height shape"
3729  "must be divisible by block size.");
3730  }
3731 
3732  const TensorShape& inputShape = inputInfo.GetShape();
3733  if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3734  {
3735  throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
3736  "must be divisible by the square of block size." );
3737  }
3738 }
3739 
3740 void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3741 {
3742  const std::string descriptorName{"ComparisonQueueDescriptor"};
3743 
3744  ValidateNumInputs(workloadInfo, descriptorName, 2);
3745  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3746 
3747  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3748  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3749  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3750 
3751  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3752  inputTensorInfo1,
3753  outputTensorInfo,
3754  descriptorName,
3755  "input_0",
3756  "input_1");
3757 
3758  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3759  {
3760  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3761  }
3762 }
3763 
3765 {
3766  const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3767 
3768  ValidateNumInputs(workloadInfo, descriptorName, 1);
3769  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3770 
3771  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3772  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3773 
3774  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3775 
3776  std::vector<DataType> supportedTypes =
3777  {
3785  };
3786 
3787  std::vector<DataType> logicalSupportedTypes =
3788  {
3790  };
3791 
3792  if (m_Parameters.m_Operation == UnaryOperation::LogicalNot)
3793  {
3794  ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
3795  }
3796  else
3797  {
3798  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3799  }
3800 
3801 
3802  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3803 }
3804 
3805 void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3806 {
3807  const std::string descriptorName{"RankQueueDescriptor"};
3808 
3809  ValidateNumInputs(workloadInfo, descriptorName, 1);
3810  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3811 
3812  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3813  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3814 
3815  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
3816  ValidateTensorNumElements(outputTensorInfo, descriptorName, 1, "output");
3817 
3818  std::vector<DataType> supportedTypes =
3819  {
3828  };
3829 
3830  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3831  ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
3832 }
3833 
3835 {
3836  const std::string descriptorName{"LogicalBinaryQueueDescriptor"};
3837 
3838  ValidateNumInputs(workloadInfo, descriptorName, 2);
3839  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3840 
3841  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3842  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3843  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3844 
3845  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3846  inputTensorInfo1,
3847  outputTensorInfo,
3848  descriptorName,
3849  "input_0",
3850  "input_1");
3851 
3852  if (inputTensorInfo0.GetDataType() != DataType::Boolean)
3853  {
3854  throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean.");
3855  }
3856 
3857  if (inputTensorInfo1.GetDataType() != DataType::Boolean)
3858  {
3859  throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean.");
3860  }
3861 
3862  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3863  {
3864  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3865  }
3866 }
3867 
3868 void ReduceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3869 {
3870  const std::string descriptorName{"ReduceQueueDescriptor"};
3871 
3872  ValidateNumInputs(workloadInfo, descriptorName, 1);
3873  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3874 
3875  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3876  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3877 
3878  std::vector<DataType> supportedTypes =
3879  {
3887  };
3888 
3889  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3890  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3891 }
3892 
3894 {
3895  // Modified from LstmQueueDescriptor::Validate to support UnidirectionalSequenceLstm
3896 
3897  const std::string descriptorName{"UnidirectionalSequenceLstmQueueDescriptor"};
3898 
3899  // check dimensions of all inputs and outputs
3900  if (workloadInfo.m_InputTensorInfos.size() != 3)
3901  {
3902  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
3903  }
3904  if (workloadInfo.m_OutputTensorInfos.size() != 3)
3905  {
3906  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
3907  }
3908 
3909  std::vector<DataType> supportedTypes =
3910  {
3913  };
3914 
3915  // check for supported type of one input and match them with all the other input and output
3916  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
3917 
3918  // Making sure clipping parameters have valid values.
3919  // == 0 means no clipping
3920  // > 0 means clipping
3921  if (m_Parameters.m_ClippingThresCell < 0.0f)
3922  {
3923  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
3924  }
3925  if (m_Parameters.m_ClippingThresProj < 0.0f)
3926  {
3927  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
3928  }
3929 
3930  unsigned int batchIndx = 0;
3931  unsigned int inputIndx = 1;
3932  uint32_t timeStep = 1;
3933  unsigned int timeIndx = 1;
3934  inputIndx = 2;
3935  if (m_Parameters.m_TimeMajor)
3936  {
3937  batchIndx = 1;
3938  timeIndx = 0;
3939 
3940  }
3941  timeStep = workloadInfo.m_InputTensorInfos[0].GetShape()[timeIndx];
3942 
3943  // Inferring batch size, number of outputs and number of cells from the inputs.
3944  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[inputIndx];
3945  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[batchIndx];
3946  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
3947  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
3948  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
3949  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
3950 
3951  // input tensor
3952  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 3, (timeStep * n_batch * n_input),
3953  descriptorName + " input_0");
3954  // outputStateInTensor
3955  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
3956  descriptorName + " input_1");
3957  // outputStateInTensor
3958  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
3959  descriptorName + " input_2");
3960 
3961  // outputTensor
3962  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 3, (timeStep * n_batch * n_output),
3963  descriptorName + " output_0");
3964 
3965  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
3966  if ( m_InputToInputWeights )
3967  {
3968  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
3969  (n_cell * n_input), "InputLayerNormWeights");
3970  }
3971 
3972  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
3973  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
3974  (n_cell * n_input), "InputToForgetWeights");
3975 
3976  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
3977  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
3978  (n_cell * n_input), "InputToCellWeights");
3979 
3980  if ( m_RecurrentToInputWeights )
3981  {
3982  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
3983  (n_cell * n_output), "RecurrentToInputWeights");
3984  }
3985 
3986  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
3987  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
3988  (n_cell * n_output), "RecurrentToForgetWeights");
3989 
3990  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
3991  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
3992  (n_cell * n_output), "RecurrentToCellWeights");
3993 
3994  // Make sure the input-gate's parameters are either both present (regular
3995  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
3996  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
3997  !m_Parameters.m_CifgEnabled) ||
3998  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3999  m_Parameters.m_CifgEnabled));
4000  if (!cifg_weights_all_or_none)
4001  {
4002  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
4003  "RecurrentToInputWeights must either both be present (regular LSTM) "
4004  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
4005  "accordingly.");
4006  }
4007 
4008  if ( m_CellToInputWeights )
4009  {
4010  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
4011  n_cell, "CellToInputWeights");
4012  }
4013  if ( m_CellToForgetWeights )
4014  {
4015  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
4016  n_cell, "CellToForgetWeights");
4017  }
4018  if ( m_CellToOutputWeights )
4019  {
4020  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
4021  n_cell, "CellToOutputWeights");
4022  }
4023 
4024  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
4025  bool peephole_weights_all_or_none =
4026  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
4027  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
4028  || ( !m_CellToInputWeights && !m_CellToForgetWeights
4029  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
4030  if (!peephole_weights_all_or_none)
4031  {
4032  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
4033  }
4034 
4035  // Make sure the input gate bias is present only when not a CIFG-LSTM.
4036  if (m_Parameters.m_CifgEnabled)
4037  {
4038  if (m_InputGateBias)
4039  {
4040  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
4041  }
4042  }
4043  else
4044  {
4045  if (!m_InputGateBias)
4046  {
4047  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
4048  "must be present.");
4049  }
4050  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
4051  n_cell, "InputGateBias");
4052  }
4053 
4054  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
4055  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
4056 
4057  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
4058  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
4059 
4060  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
4061  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
4062 
4063  if (m_ProjectionWeights)
4064  {
4065  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
4066  (n_cell * n_output), "ProjectionWeights");
4067  }
4068  if (m_ProjectionBias)
4069  {
4070  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
4071  }
4072 
4073  // Making sure the projection tensors are consistent:
4074  // 1) If projection weight is not present, then projection bias should not be
4075  // present.
4076  // 2) If projection weight is present, then projection bias is optional.
4077  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
4078  !m_Parameters.m_ProjectionEnabled)
4079  || (m_ProjectionWeights && !m_ProjectionBias &&
4080  m_Parameters.m_ProjectionEnabled)
4081  || (m_ProjectionWeights && m_ProjectionBias &&
4082  m_Parameters.m_ProjectionEnabled));
4083  if (!projecton_tensors_consistent)
4084  {
4085  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
4086  }
4087 
4088  // The four layer normalization weights either all have values or none of them have values. Additionally, if
4089  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
4090  // either all have values or none of them have values. Layer normalization is used when the values of all the
4091  // layer normalization weights are present
4092  if (m_InputLayerNormWeights)
4093  {
4094  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
4095  }
4096  if (m_ForgetLayerNormWeights)
4097  {
4098  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4099  }
4100  if (m_CellLayerNormWeights)
4101  {
4102  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4103  }
4104  if (m_OutputLayerNormWeights)
4105  {
4106  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4107  }
4108 
4109  if (m_Parameters.m_LayerNormEnabled)
4110  {
4111  if (!m_Parameters.m_CifgEnabled)
4112  {
4113  if (!m_InputLayerNormWeights)
4114  {
4115  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
4116  "disabled but InputLayerNormWeights are not present");
4117  }
4118  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
4119  1, n_cell, "InputLayerNormWeights");
4120  }
4121  else if (m_InputLayerNormWeights)
4122  {
4123  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
4124  "enabled");
4125  }
4126 
4127  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
4128  "ForgetLayerNormWeights");
4129  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4130 
4131  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
4132  "OutputLayerNormWeights");
4133  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4134 
4135  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
4136  "CellLayerNormWeights");
4137  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4138  }
4139  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
4140  {
4141  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
4142  "normalisation weights are present.");
4143  }
4144 }
4145 
4146 
4147 } // namespace armnn
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:432
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:280
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void ValidateTensorNumDimNumElem(const TensorInfo &tensorInfo, unsigned int numDimension, unsigned int numElements, std::string const &tensorName) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2021 ARM Limited and Contributors.
void Validate(const WorkloadInfo &workloadInfo) const
SizeType GetSize() const
Definition: Types.hpp:338
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:201
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:202
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
DataType
Definition: Types.hpp:48
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
float GetQuantizationScale() const
Definition: Tensor.cpp:461
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
Definition: Tensor.hpp:198
bool has_value() const noexcept
Definition: Optional.hpp:53
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void ValidateTensorNumDimensions(const TensorInfo &tensor, std::string const &descName, unsigned int numDimensions, std::string const &tensorName) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
Contains information about TensorInfos of a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetChannelsIndex() const
bool IsQuantized() const
Definition: Tensor.cpp:504
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin