ArmNN
 20.11
WorkloadData.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
11 
12 #include <algorithm>
13 #include <iomanip>
14 #include <string>
15 #include <sstream>
16 
17 #include <fmt/format.h>
18 
19 using namespace armnnUtils;
20 
21 namespace armnn
22 {
23 
24 //---------------------------------------------------------------
26 {
27  switch (inputDataType)
28  {
29  case DataType::Float16:
30  return DataType::Float16;
31  case DataType::BFloat16:
32  case DataType::Float32:
33  return DataType::Float32;
34  case DataType::QAsymmS8:
35  return DataType::Signed32;
36  case DataType::QAsymmU8:
37  return DataType::Signed32;
38  case DataType::QSymmS8:
39  return DataType::Signed32;
40  case DataType::QSymmS16:
41  return DataType::Signed32;
42  default:
43  ARMNN_ASSERT_MSG(false, "Invalid input data type");
44  return DataType::Float32;
45  }
46 }
47 
48 namespace
49 {
50 
51 //---------------------------------------------------------------
52 //android ndk does not support std::to_string function.
53 template <typename T>
54 std::string to_string(T value)
55 {
56  std::ostringstream os;
57  os << value;
58  return os.str();
59 }
60 
61 //---------------------------------------------------------------
62 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
63 {
64  if (!ptr)
65  {
66  throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
67  paramName + " parameter must be set.");
68  }
69 }
70 
71 //---------------------------------------------------------------
72 void ValidateTensorShapesMatch(const TensorInfo& first,
73  const TensorInfo& second,
74  std::string const& descName,
75  std::string const& firstName,
76  std::string const& secondName)
77 {
78  if (first.GetShape() != second.GetShape())
79  {
80  throw InvalidArgumentException(descName + ": "
81  + firstName + " & " + secondName + " must have identical shapes");
82  }
83 }
84 
85 //---------------------------------------------------------------
86 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
87 {
88  if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
89  {
90  throw InvalidArgumentException(descName +
91  ": Requires exactly " + to_string(expectedSize) + "input(s). " +
92  to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
93  }
94 }
95 
96 //---------------------------------------------------------------
97 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
98 {
99  if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
100  {
101  throw InvalidArgumentException(descName +
102  ": Requires exactly " + to_string(expectedSize) + " output(s). " +
103  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
104  }
105 }
106 
107 //---------------------------------------------------------------
108 void ValidateTensorNumDimensions(const TensorInfo& tensor,
109  std::string const& descName,
110  unsigned int numDimensions,
111  std::string const& tensorName)
112 {
113  if (tensor.GetNumDimensions() != numDimensions)
114  {
115  throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
116  to_string(tensor.GetNumDimensions()) + " dimensions for " +
117  tensorName + " tensor.");
118  }
119 }
120 
121 //---------------------------------------------------------------
122 void ValidateTensorNumElements(const TensorInfo& tensor,
123  std::string const& descName,
124  unsigned int numElements,
125  std::string const& tensorName)
126 {
127  if (tensor.GetNumElements() != numElements)
128  {
129  throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
130  to_string(tensor.GetNumElements()) + " elements for " +
131  tensorName + " tensor.");
132  }
133 }
134 
135 //---------------------------------------------------------------
136 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
137  unsigned int numDimension,
138  unsigned int numElements,
139  std::string const& tensorName)
140 {
141  const std::string functionName{"ValidateTensorNumDimNumElem"};
142  ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
143  ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
144 }
145 
146 //---------------------------------------------------------------
147 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
148  const std::string& descName, std::string const& tensorName)
149 {
150  if (tensor.GetDataType() != dataType)
151  {
152  throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
153  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
154  }
155 }
156 
157 void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
158 {
160  if (tensor.GetDataType() != DataType::QSymmS8 &&
161  tensor.GetDataType() != DataType::QuantizedSymm8PerAxis)
162  {
163  throw InvalidArgumentException(descName +
164  ": Expected data type which supports per-axis quantization scheme but got " +
165  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
166  }
168 }
169 
170 //---------------------------------------------------------------
171 void ValidateTensorQuantizationSpace(const TensorInfo& first,
172  const TensorInfo& second,
173  const std::string& descName,
174  std::string const& firstName,
175  std::string const& secondName)
176 {
177  if (!first.IsQuantized() ||
178  !second.IsQuantized())
179  {
180  // Not a quantized type, ignore the validation
181  return;
182  }
183 
184  DataType firstDataType = first.GetDataType();
185  DataType secondDataType = second.GetDataType();
186 
187  if (firstDataType != secondDataType)
188  {
189  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
190  " must be of the same quantized type, " +
191  firstName + " is " + GetDataTypeName(firstDataType) + ", " +
192  secondName + " is " + GetDataTypeName(secondDataType));
193  }
194 
195  if (!first.IsTypeSpaceMatch(second))
196  {
197  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
198  " must have the same quantization space, " +
199  firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
200  " and scale " + to_string(first.GetQuantizationScale()) + ", " +
201  secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
202  " and scale " + to_string(second.GetQuantizationScale()));
203  }
204 }
205 
206 //---------------------------------------------------------------
207 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
208  const TensorInfo& inputTensorInfo,
209  const TensorInfo& weightsTensorInfo,
210  const std::string& descName)
211 {
212  // Helper lambda function to validate a single bias quantization scale value
213  auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
214  {
215  constexpr float tolerance = 0.000001f;
216  if (std::abs(biasScale - expectedScale) > tolerance)
217  {
218  // Print the float values with extra precision to see very small differences
219  std::stringstream msg;
220  msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
221  " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
222  biasScale;
223  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
224  }
225  };
226 
227  if (biasTensor.GetQuantizationOffset() != 0)
228  {
229  throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
230  to_string(biasTensor.GetQuantizationOffset()));
231  }
232 
233  if (biasTensor.HasMultipleQuantizationScales() || weightsTensorInfo.HasMultipleQuantizationScales())
234  {
235  // Validate per-axis quantization scales
236  const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
237  const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
238 
239  if (weightScales.size() != biasScales.size())
240  {
241  std::stringstream msg;
242  msg << descName << ": Expected matching number of per-axis quantization scales for weights and bias, "
243  << "but got different values. This is currently unsupported: weights=" << weightScales.size()
244  << ", biases=" << biasScales.size();
245  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
246  }
247 
248  for (size_t i = 0ul; i < biasScales.size(); ++i)
249  {
250  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
251  VerifyBiasQuantizationScale(biasScales[i], expectedScale);
252  }
253  }
254  else
255  {
256  // Validate per-tensor quantization scale
257  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
258  VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
259  }
260 }
261 
262 //---------------------------------------------------------------
263 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
264  unsigned int numExpected,
265  const std::string& descName,
266  const std::string& varName)
267 {
268  if (vec.empty() && numExpected > 0)
269  {
270  throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
271  }
272 
273  for (unsigned int i = 0; i < numExpected; ++i)
274  {
275  if (!vec[i])
276  {
277  throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
278  }
279  }
280 }
281 
282 //---------------------------------------------------------------
283 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
284  const TensorInfo& second,
285  const TensorInfo& output,
286  std::string const& descName,
287  std::string const& firstName,
288  std::string const& secondName)
289 {
290  // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
291  // broadcasted.
292  if (first.GetNumDimensions() != second.GetNumDimensions())
293  {
294  throw InvalidArgumentException(descName + ": Tensors "
295  + firstName + " & " + secondName
296  + " must have the same number of dimensions in order to be broadcasted");
297  }
298  uint32_t numDims = first.GetNumDimensions();
299  std::vector<uint32_t> outputDims(numDims, 0u);
300  for (uint32_t i = 0; i < numDims; i++)
301  {
302  const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
303  const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
304  if (dimsNotEqual && dimsNotOne)
305  {
306  throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
307  }
308  outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
309  }
310  TensorShape broadcastShape = TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
311  if (broadcastShape != output.GetShape())
312  {
313  throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
314  + firstName + " & " + secondName
315  + " does not match the output shape");
316  }
317 }
318 
319 //---------------------------------------------------------------
320 void ValidateDataTypes(const TensorInfo& info,
321  const std::vector<armnn::DataType>& supportedTypes,
322  std::string const& descName)
323 {
324  auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
325  if (iterator == supportedTypes.end())
326  {
327  throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
328  }
329 }
330 
331 //---------------------------------------------------------------
332 void ValidateTensorDataTypesMatch(const TensorInfo& first,
333  const TensorInfo& second,
334  std::string const& descName,
335  std::string const& firstName,
336  std::string const& secondName)
337 {
338  if (first.GetDataType() != second.GetDataType())
339  {
340  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
341  " must have identical data types.");
342  }
343 }
344 
345 //---------------------------------------------------------------
346 void ValidateTensorNumElementsMatch(const TensorInfo& first,
347  const TensorInfo& second,
348  std::string const& descName,
349  std::string const& firstName,
350  std::string const& secondName)
351 {
352  if (first.GetNumElements() != second.GetNumElements())
353  {
354  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
355  " must have the same number of elements.");
356  }
357 }
358 
359 void ValidateWeightDataType(const TensorInfo& inputInfo,
360  const TensorInfo& weightInfo,
361  const std::string& descName)
362 {
363  const DataType inputType = inputInfo.GetDataType();
364  if (IsQuantized8BitType(inputType))
365  {
367  const std::vector<DataType> validTypes =
368  {
369  DataType::QAsymmS8,
370  DataType::QAsymmU8,
371  DataType::QSymmS8,
372  DataType::QuantizedSymm8PerAxis // deprecated
373  };
375 
376  ValidateDataTypes(weightInfo, validTypes, descName);
377  }
378  else
379  {
380  ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
381  }
382 }
383 
384 void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
385  const std::string& descName,
386  const std::string& tensorName)
387 {
388  const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
389  if (!quantizationDim.has_value())
390  {
391  throw InvalidArgumentException(fmt::format("{0}: Quantization dimension for per-axis quantization "
392  "not set on tensor {1}.", descName, tensorName));
393  }
394 
395  if (quantizationDim.value() != 0)
396  {
397  throw InvalidArgumentException(fmt::format(
398  "{0}: Quantization dimension for per-axis quantization expected to be 0 on tensor {1}, "
399  "but got: {2}", descName, tensorName, quantizationDim.value()));
400  }
401 }
402 
403 void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
404  const std::string& descName,
405  const std::string& tensorName)
406 {
407  int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
408  if (quantizationOffset != 0)
409  {
410  throw InvalidArgumentException(fmt::format(
411  "{0}: Quantization offset for per-axis quantization expected to be 0 on tensor {1}, but got: {2}",
412  descName, tensorName, quantizationOffset));
413  }
414 }
415 
416 void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
417  const TensorInfo& outputInfo,
418  const TensorInfo& weightInfo,
419  const Optional<TensorInfo>& optionalBiasInfo,
420  const std::string& descName)
421 {
422  if (weightInfo.HasPerAxisQuantization())
423  {
424  const DataType inputDataType = inputInfo.GetDataType();
425  const DataType outputDataType = outputInfo.GetDataType();
426 
427  const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
428 
429  if (!canHavePerAxisQuantization)
430  {
431  throw InvalidArgumentException(fmt::format(
432  "{0}: Per-axis quantization parameters set on tensor {1}, but data type does not support "
433  "per-axis quantization.", descName, "weight"));
434  }
435 
436 
437  ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
438  ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
439  ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
440 
441  if (optionalBiasInfo.has_value())
442  {
443  const TensorInfo& biasInfo = optionalBiasInfo.value();
444  if (!biasInfo.HasPerAxisQuantization())
445  {
446  throw InvalidArgumentException(fmt::format(
447  "{}: Per-axis quantization parameters not set on bias tensor, "
448  "despite being set on weight tensor.", descName));
449  }
450 
451  ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
452  ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
453  ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
454  }
455  }
456 }
457 
458 } // anonymous namespace
459 
460 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
461  unsigned int numExpectedIn, unsigned int numExpectedOut) const
462 {
463  ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
464  ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
465 }
466 
467 //---------------------------------------------------------------
468 void MapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
469 {
470  const std::string descriptorName{"MapQueueDescriptor"};
471 
472  ValidateNumInputs(workloadInfo, descriptorName, 1);
473  ValidateNumOutputs(workloadInfo, descriptorName, 0);
474 
475  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
476  {
477  if (!m_Inputs[i])
478  {
480  fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
481  }
482  }
483 }
484 
485 //---------------------------------------------------------------
486 void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
487 {
488  const std::string descriptorName{"UnmapQueueDescriptor"};
489 
490  ValidateNumInputs(workloadInfo, descriptorName, 1);
491  ValidateNumOutputs(workloadInfo, descriptorName, 0);
492 
493  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
494  {
495  if (!m_Inputs[i])
496  {
498  fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
499  }
500  }
501 }
502 
503 //---------------------------------------------------------------
504 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
505 {
506  const std::string descriptorName{"MemCopyQueueDescriptor"};
507 
508  ValidateNumInputs(workloadInfo, descriptorName, 1);
509  ValidateNumOutputs(workloadInfo, descriptorName , 1);
510 
511  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
512  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
513 
514  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
515  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
516 
517  if (m_Inputs.size() != m_Outputs.size())
518  {
519  throw InvalidArgumentException(fmt::format(
520  "{0}: Number of inputs ({1}) does not match the number of outputs ({2}).",
521  descriptorName, m_Inputs.size(), m_Outputs.size()));
522  }
523 
524  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
525  {
526  if (!m_Inputs[i])
527  {
528  throw InvalidArgumentException(fmt::format(
529  "{0}: Invalid NULL input {1}.", descriptorName, i));
530  }
531 
532  if (!m_Outputs[i])
533  {
534  throw InvalidArgumentException(fmt::format("{0}: Invalid NULL output {1}", descriptorName, i));
535  }
536  }
537 }
538 
539 //---------------------------------------------------------------
540 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
541 {
542  ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
543  ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
544 
545  if (workloadInfo.m_InputTensorInfos.size() != 1)
546  {
547  throw InvalidArgumentException(fmt::format("Number of input infos ({}) is not 1.",
548  workloadInfo.m_InputTensorInfos.size()));
549 
550  }
551 
552  if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
553  {
554  throw InvalidArgumentException(fmt::format(
555  "Number of input infos ({0}) does not match the number of output infos ({1})",
556  workloadInfo.m_InputTensorInfos.size(), workloadInfo.m_OutputTensorInfos.size()));
557  }
558 
559  for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
560  {
561  if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
562  workloadInfo.m_OutputTensorInfos[i].GetNumElements())
563  {
564  throw InvalidArgumentException(fmt::format(
565  "Number of elements for tensor input and output {} does not match", i ));
566  }
567  }
568 
569  if (m_Inputs.size() != 1)
570  {
571  throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
572  }
573 
574  if (m_Inputs.size() != m_Outputs.size())
575  {
576  throw InvalidArgumentException(fmt::format(
577  "Number of inputs ({0}) does not match the number of outputs ({1})",
578  m_Inputs.size(), m_Outputs.size()));
579  }
580 
581  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
582  {
583  if (!m_Inputs[i])
584  {
585  throw InvalidArgumentException(fmt::format("Invalid null input {}", i));
586  }
587 
588  if (!m_Outputs[i])
589  {
590  throw InvalidArgumentException(fmt::format("Invalid null output {}", i));
591  }
592  }
593 }
594 
595 //---------------------------------------------------------------
596 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
597 {
598  ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
599  ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
600 
601  if (m_Inputs.size() != 1)
602  {
603  throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
604  }
605 
606  if (m_Outputs.size() != 0)
607  {
608  throw InvalidArgumentException(fmt::format("Number of outputs ({}) is not 0.", m_Outputs.size()));
609  }
610 
611  if (!m_Inputs[0])
612  {
613  throw InvalidArgumentException(fmt::format("Invalid null input 0"));
614  }
615 }
616 
617 //---------------------------------------------------------------
618 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
619 {
620  const std::string descriptorName{"ActivationQueueDescriptor"};
621 
622  ValidateNumInputs(workloadInfo, descriptorName, 1);
623  ValidateNumOutputs(workloadInfo, descriptorName, 1);
624 
625  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
626  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
627 
628  std::vector<DataType> supportedTypes =
629  {
636  };
637 
638  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
639  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
640  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
641 }
642 
643 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
644 {
645  const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
646 
647  ValidateNumInputs(workloadInfo, descriptorName, 1);
648  ValidateNumOutputs(workloadInfo, descriptorName, 1);
649 
650  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
651  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
652 
653  if (outputTensorInfo.GetDataType() != DataType::Signed32 &&
654  outputTensorInfo.GetDataType() != DataType::Signed64)
655  {
656  throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32 or Int64.");
657  }
658 
659  std::vector<DataType> supportedInputTypes =
660  {
669  };
670 
671  ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
672 
673  auto inputShape = inputTensorInfo.GetShape();
674  auto outputShape = outputTensorInfo.GetShape();
675 
676  auto inputNumDimensions = inputShape.GetNumDimensions();
677  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
678 
679  const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
680 
681  // 1D input shape results in scalar output shape
682  if (inputShape.GetNumDimensions() == 1)
683  {
684  if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
685  {
686  throw InvalidArgumentException(descriptorName + outputShapeError);
687  }
688  }
689  else
690  {
691  for (unsigned int i = 0; i < unsignedAxis; ++i)
692  {
693  if (outputShape[i] != inputShape[i])
694  {
695  throw InvalidArgumentException(descriptorName + outputShapeError);
696  }
697  }
698 
699  for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
700  {
701  if (outputShape[i - 1] != inputShape[i])
702  {
703  throw InvalidArgumentException(descriptorName + outputShapeError);
704  }
705  }
706  }
707 }
708 
709 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
710 {
711  const std::string descriptorName{"SoftmaxQueueDescriptor"};
712 
713  ValidateNumInputs(workloadInfo, descriptorName, 1);
714  ValidateNumOutputs(workloadInfo, descriptorName, 1);
715 
716  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
717  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
718 
719  std::vector<DataType> supportedTypes =
720  {
727  };
728 
729  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
730  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
731  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
732 }
733 
734 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
735 {
736  const std::string descriptorName{"SplitterQueueDescriptor"};
737 
738  ValidateNumInputs(workloadInfo, descriptorName, 1);
739 
740  // Check the supported data types
741  std::vector<DataType> supportedTypes =
742  {
751  };
752 
753  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
754  for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
755  {
756  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
757  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
758 
759  const std::string outputName = "output_" + std::to_string(i);
760  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
761  }
762 
763  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
764  {
765  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
766  }
767 
768  if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
769  {
771  descriptorName + ": Number of split windows "
772  "has to match number of workloadInfo.m_OutputTensorInfos. "
773  "Number of windows: " +
774  to_string(m_ViewOrigins.size()) +
775  ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
776  }
777 
778  //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
779  std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
780  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
781  {
782  //Checks that the dimensionality of input is same as the split windows.
783  ViewOrigin const& e = m_ViewOrigins[w];
784  if (e.m_Origin.size() != inputDims)
785  {
786  throw InvalidArgumentException(descriptorName + ": Window origin have to "
787  "have the same dimensionality as the input tensor. "
788  "Window origin (index: " +
789  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
790  " dimensions, the input "
791  "tensor has " +
792  to_string(inputDims) + " dimensions.");
793  }
794  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
795  {
796  if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
797  workloadInfo.m_InputTensorInfos[0].GetShape()[i])
798  {
799  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
800  "be smaller or equal than the size of the input in that coord.");
801  }
802  }
803  }
804 }
805 
806 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
807 {
808  const std::string descriptorName{"ConcatQueueDescriptor"};
809 
810  ValidateNumOutputs(workloadInfo, descriptorName, 1);
811 
812  if (m_Inputs.size() <= 0)
813  {
814  throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
815  }
816  if (m_Outputs.size() <= 0)
817  {
818  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
819  }
820 
821  if (workloadInfo.m_InputTensorInfos.size() <= 0)
822  {
823  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
824  }
825  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
826  {
827  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
828  }
829 
830  if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
831  {
832  throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
833  }
834 
835  if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
836  {
837  return;
838  }
839 
840  if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
841  {
843  descriptorName + ": Number of split windows "
844  "has to match number of workloadInfo.m_InputTensorInfos. "
845  "Number of windows: " +
846  to_string(m_ViewOrigins.size()) +
847  ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
848  }
849 
850  //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
851  std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
852  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
853  {
854  //Checks that the dimensionality of output is same as the split windows.
855  ViewOrigin const& e = m_ViewOrigins[w];
856  if (e.m_Origin.size() != outputDims)
857  {
858  throw InvalidArgumentException(descriptorName + ": Window origin have to "
859  "have the same dimensionality as the output tensor. "
860  "Window origin (index: " +
861  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
862  " dimensions, the output "
863  "tensor has " +
864  to_string(outputDims) + " dimensions.");
865  }
866  //Checks that the merge windows are within the output tensor.
867  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
868  {
869  if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
870  > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
871  {
872  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
873  "be smaller or equal than the size of the output in that coord.");
874  }
875  }
876  }
877 
878  // Check the supported data types
879  std::vector<DataType> supportedTypes =
880  {
889  };
890 
891  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
892  for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
893  {
894  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
895  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
896 
897  const std::string inputName = "input_" + std::to_string(i);
898  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
899  }
900 }
901 
902 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
903 {
904  const std::string descriptorName{"StackQueueDescriptor"};
905 
906  ValidateNumOutputs(workloadInfo, descriptorName, 1);
907 
908  if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
909  {
910  throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
911  }
912 
913  // All inputs must have the same shape, which is defined in parameters
914  const TensorShape& inputShape = m_Parameters.m_InputShape;
915  for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
916  {
917  if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
918  {
919  throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
920  }
921  }
922 
923  if (inputShape.GetNumDimensions() > 4)
924  {
925  throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
926  }
927 
928  // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
929  // since the output tensor has an additional dimension.
930  if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
931  {
932  throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
933  "than the number of input dimensions.");
934  }
935 
936  // Output shape must be as inferred from the input shape
937  const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
938  for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
939  {
940  if (outputShape[i] != inputShape[i])
941  {
942  throw InvalidArgumentException(descriptorName + ": Output tensor must "
943  "match shape inferred from input tensor.");
944  }
945  }
946 
947  if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
948  {
949  throw InvalidArgumentException(descriptorName + ": Output tensor must "
950  "match shape inferred from input tensor.");
951  }
952 
953  for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
954  {
955  if (outputShape[i] != inputShape[i-1])
956  {
957  throw InvalidArgumentException(descriptorName + ": Output tensor must "
958  "match shape inferred from input tensor.");
959  }
960  }
961 
962  if (outputShape.GetNumDimensions() > 5)
963  {
964  throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
965  }
966 
967  // Check the supported data types
968  std::vector<DataType> supportedTypes =
969  {
978  };
979 
980  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
981 
982  for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
983  {
984  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
985  workloadInfo.m_InputTensorInfos[i],
986  descriptorName,
987  "input_0",
988  "input_" + std::to_string(i));
989  }
990 
991  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
992  workloadInfo.m_OutputTensorInfos[0],
993  descriptorName,
994  "input_0",
995  "output");
996 }
997 
998 void FillQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
999 {
1000  const std::string descriptorName{"FillQueueDescriptor"};
1001 
1002  ValidateNumInputs(workloadInfo, descriptorName, 1);
1003  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1004 
1005  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1006  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1007 
1008  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 1, "input");
1009 
1010  std::vector<DataType> supportedTypes =
1011  {
1016  };
1017 
1018  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1019 }
1020 
1022 {
1023  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
1024 
1025  ValidateNumInputs(workloadInfo, descriptorName, 1);
1026  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1027 
1028  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1029  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1030 
1031  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1032 
1033  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
1034  {
1035  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
1036  }
1037 
1038  ValidatePointer(m_Weight, descriptorName, "weight");
1039 
1040  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1041  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
1042 
1043  if (m_Parameters.m_BiasEnabled)
1044  {
1045  ValidatePointer(m_Bias, descriptorName, "bias");
1046 
1047  // Validates type and quantization values.
1048  const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
1049  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1050 
1051  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1052  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1053  }
1054 
1055  // Check the supported data types
1056  std::vector<DataType> supportedTypes =
1057  {
1064  };
1065 
1066  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1067 
1068  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1069  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1070  {
1071  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1072  {
1073  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1074  "for BFloat16 input.");
1075  }
1076  }
1077  else
1078  {
1079  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1080  }
1081 }
1082 
1084 {
1085  const std::string descriptorName{"NormalizationQueueDescriptor"};
1086 
1087  ValidateNumInputs(workloadInfo, descriptorName, 1);
1088  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1089 
1090  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1091  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1092 
1093  // Check the supported data types
1094  std::vector<DataType> supportedTypes =
1095  {
1102  };
1103 
1104  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1105 
1106  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1107 
1108  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1109 }
1110 
1111 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1112 {
1113  const std::string descriptorName{"AdditionQueueDescriptor"};
1114 
1115  ValidateNumInputs(workloadInfo, descriptorName, 2);
1116  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1117 
1118  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1119  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1120  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1121 
1122  std::vector<DataType> supportedTypes =
1123  {
1131  };
1132 
1133  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1134  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1135  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1136 
1137  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1138  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1139 
1140  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1141  inputTensorInfo1,
1142  outputTensorInfo,
1143  descriptorName,
1144  "input_0",
1145  "input_1");
1146 }
1147 
1149 {
1150  const std::string descriptorName{"MultiplicationQueueDescriptor"};
1151 
1152  ValidateNumInputs(workloadInfo, descriptorName, 2);
1153  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1154 
1155  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1156  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1157  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1158 
1159  std::vector<DataType> supportedTypes =
1160  {
1168  };
1169 
1170  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1171  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1172  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1173 
1174  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1175  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1176 
1177  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1178  inputTensorInfo1,
1179  outputTensorInfo,
1180  descriptorName,
1181  "input_0",
1182  "input_1");
1183 }
1184 
1186 {
1187  const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
1188 
1189  ValidateNumInputs(workloadInfo, descriptorName, 1);
1190  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1191 
1192  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1193  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1194 
1195  std::vector<DataType> supportedTypes =
1196  {
1203  };
1204 
1205  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1206  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1207 
1208  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1209  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1210 
1211  ValidatePointer(m_Mean, descriptorName, "mean");
1212  ValidatePointer(m_Variance, descriptorName, "variance");
1213  ValidatePointer(m_Beta, descriptorName, "beta");
1214  ValidatePointer(m_Gamma, descriptorName, "gamma");
1215 
1216  const TensorInfo& mean = m_Mean->GetTensorInfo();
1217  const TensorInfo& variance = m_Variance->GetTensorInfo();
1218  const TensorInfo& beta = m_Beta->GetTensorInfo();
1219  const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1220 
1221  ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1222  ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1223  ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1224  ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
1225 
1226  ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1227  ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1228  ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
1229 }
1230 
1232 {
1233  const std::string descriptorName{"Convolution2dQueueDescriptor"};
1234 
1235  ValidateNumInputs(workloadInfo, descriptorName, 1);
1236  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1237 
1238  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1239  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1240 
1241  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1242  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1243 
1244  ValidatePointer(m_Weight, descriptorName, "weight");
1245 
1246  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1247  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1248 
1249  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1250 
1251  Optional<TensorInfo> optionalBiasTensorInfo;
1252  if (m_Parameters.m_BiasEnabled)
1253  {
1254  ValidatePointer(m_Bias, descriptorName, "bias");
1255 
1256  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1257  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1258 
1259  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1260  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1261  }
1262 
1263  ValidatePerAxisQuantization(inputTensorInfo,
1264  outputTensorInfo,
1265  weightTensorInfo,
1266  optionalBiasTensorInfo,
1267  descriptorName);
1268 
1269  std::vector<DataType> supportedTypes =
1270  {
1278  };
1279 
1280  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1281 
1282  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1283  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1284  {
1285  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1286  {
1287  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1288  "for BFloat16 input.");
1289  }
1290  }
1291  else
1292  {
1293  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1294  }
1295 }
1296 
1298 {
1299  const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1300 
1301  ValidateNumInputs(workloadInfo, descriptorName, 1);
1302  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1303 
1304  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1305  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1306 
1307  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1308  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1309 
1310  ValidatePointer(m_Weight, descriptorName, "weight");
1311 
1312  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1313  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1314 
1315  if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1316  {
1318  fmt::format("{}: dilationX (provided {}) and dilationY (provided {}) "
1319  "cannot be smaller than 1.",
1320  descriptorName, m_Parameters.m_DilationX, m_Parameters.m_DilationX));
1321  }
1322 
1323  const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1324 
1325  // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1326  // inputChannels * channelMultiplier should be equal to outputChannels.
1327  const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1328  const unsigned int numWeightInputChannels = weightTensorInfo.GetShape()[1];
1329  const unsigned int numWeightOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1330  if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1331  {
1332  throw InvalidArgumentException(fmt::format(
1333  "{0}: output_channels (provided {1}) should be equal to input_channels (provided {2}) "
1334  "multiplied by channel_multiplier (provided {3}).",
1335  descriptorName, numWeightOutputChannels, numWeightInputChannels, numWeightChannelMultiplier));
1336  }
1337 
1338  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1339 
1340  Optional<TensorInfo> optionalBiasTensorInfo;
1341  if (m_Parameters.m_BiasEnabled)
1342  {
1343  ValidatePointer(m_Bias, descriptorName, "bias");
1344 
1345  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1346  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1347 
1348  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1349  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1350  }
1351  ValidatePerAxisQuantization(inputTensorInfo,
1352  outputTensorInfo,
1353  weightTensorInfo,
1354  optionalBiasTensorInfo,
1355  descriptorName);
1356 
1357  std::vector<DataType> supportedTypes =
1358  {
1365  };
1366 
1367  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1368  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1369 }
1370 
1371 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1372 {
1373  const std::string descriptorName{"PermuteQueueDescriptor"};
1374 
1375  ValidateNumInputs(workloadInfo, descriptorName, 1);
1376  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1377 
1378  const PermutationVector& mapping = m_Parameters.m_DimMappings;
1379 
1380  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1381  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1382 
1383  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1384  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1385 
1386  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1387  {
1388  if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1389  {
1390  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1391  " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1392  "must match dst dimension " + to_string(mapping[i]) +
1393  " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1394  }
1395  }
1396 
1397  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1398 }
1399 
1400 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1401 {
1402  const std::string descriptorName{"Pooling2dQueueDescriptor"};
1403 
1404  ValidateNumInputs(workloadInfo, descriptorName, 1);
1405  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1406 
1407  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1408  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1409 
1410  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1411  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1412 
1413  std::vector<DataType> supportedTypes =
1414  {
1421  };
1422 
1423  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1424  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1425 }
1426 
1428 {
1429  const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1430 
1431  ValidateNumInputs(workloadInfo, descriptorName, 1);
1432  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1433 
1434  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1435  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1436 
1437  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1438  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1439 
1440  std::vector<DataType> supportedTypes =
1441  {
1448  };
1449 
1450  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1451  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1452 
1453  // ResizeBilinear only changes width and height: batch and channel count must match.
1454  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1455  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1456  if (inputBatchSize != outputBatchSize)
1457  {
1459  fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1460  descriptorName, inputBatchSize, outputBatchSize));
1461  }
1462 
1463  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1464  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1465  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1466  if (inputChannelCount != outputChannelCount)
1467  {
1469  fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1470  descriptorName, inputChannelCount, outputChannelCount));
1471  }
1472 }
1473 
1474 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1475 {
1476  const std::string descriptorName{"ResizeQueueDescriptor"};
1477 
1478  ValidateNumInputs(workloadInfo, descriptorName, 1);
1479  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1480 
1481  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1482  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1483 
1484  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1485  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1486 
1487  std::vector<DataType> supportedTypes =
1488  {
1495  };
1496 
1497  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1498  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1499 
1500  // Resize only changes width and height: batch and channel count must match.
1501  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1502  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1503  if (inputBatchSize != outputBatchSize)
1504  {
1506  fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1507  descriptorName, inputBatchSize, outputBatchSize));
1508  }
1509 
1510  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1511  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1512  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1513  if (inputChannelCount != outputChannelCount)
1514  {
1516  fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1517  descriptorName, inputChannelCount, outputChannelCount));
1518  }
1519 }
1520 
1522 {
1523  const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1524 
1525  ValidateNumInputs(workloadInfo, descriptorName, 1);
1526  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1527 
1528  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1529  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1530 
1531  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1532  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1533 
1534  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1535 
1536  if (m_Parameters.m_Min > m_Parameters.m_Max)
1537  {
1538  throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1539  }
1540 }
1541 
1543 {
1544  const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1545 
1546  ValidateNumInputs(workloadInfo, descriptorName, 1);
1547  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1548 
1549  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1550  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1551 
1552  if (inputTensorInfo.GetNumDimensions() > 4)
1553  {
1554  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1555  }
1556 
1557  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1558 
1559  // Check the supported data types
1560  std::vector<DataType> supportedTypes =
1561  {
1565  };
1566 
1567  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1568  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1569 }
1570 
1572 {
1573  const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1574 
1575  ValidateNumInputs(workloadInfo, descriptorName, 1);
1576  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1577 
1578  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1579  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1580 
1581  if (inputTensorInfo.GetNumDimensions() > 4)
1582  {
1583  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1584  }
1585 
1586  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1587 
1588  // Check the supported data types
1589  std::vector<DataType> supportedTypes =
1590  {
1597  };
1598 
1599  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1600  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1601 }
1602 
1603 void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1604 {
1605  const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1606 
1607  ValidateNumInputs(workloadInfo, descriptorName, 1);
1608  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1609 
1610  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1611  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1612 
1613  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1614 
1615  std::vector<DataType> supportedTypes =
1616  {
1620  };
1621 
1622  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1623  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1624 }
1625 
1626 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1627 {
1628  const std::string descriptorName{"ConstantQueueDescriptor"};
1629 
1630  ValidateNumInputs(workloadInfo, descriptorName, 0);
1631  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1632 
1633  if (!m_LayerOutput)
1634  {
1635  throw InvalidArgumentException(descriptorName + ": No const input specified.");
1636  }
1637 
1638  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1639  ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1640 
1641  // Check the supported data types
1642  std::vector<DataType> supportedTypes =
1643  {
1652  };
1653 
1654  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1655 }
1656 
1657 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1658 {
1659  const std::string descriptorName{"ReshapeQueueDescriptor"};
1660 
1661  ValidateNumInputs(workloadInfo, descriptorName, 1);
1662  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1663 
1664  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1665  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1666 
1667  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1668 
1669  // Check the supported data types
1670  std::vector<DataType> supportedTypes =
1671  {
1680  };
1681 
1682  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1683  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1684 }
1685 
1687 {
1688  const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1689 
1690  ValidateNumInputs(workloadInfo, descriptorName, 1);
1691  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1692 
1693  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1694  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1695 
1696  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1697  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1698 
1699  if (m_Parameters.m_BlockShape.size() != 2)
1700  {
1701  throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1702  }
1703 
1704  if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1705  {
1706  throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1707  "dimensions as Block Shape.");
1708  }
1709 
1710  const TensorShape& inputShape = inputTensorInfo.GetShape();
1711 
1712  std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1713  std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1714 
1715  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1716 
1717  const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1718  widthPad.first + widthPad.second;
1719  const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1720  heightPad.first + heightPad.second;
1721 
1722  const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1723  inputShape[dimensionIndices.GetChannelsIndex()];
1724  const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1725 
1726  if (numOutputElements != numInputElements)
1727  {
1728  throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1729  to_string(numInputElements) + " after padding but output tensor has " +
1730  to_string(numOutputElements) + " elements.");
1731  }
1732 
1733  if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1734  {
1735  throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1736  "divisible by Block Shape in all spatial dimensions");
1737  }
1738 
1739  std::vector<DataType> supportedTypes =
1740  {
1747  };
1748 
1749  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1750  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1751 }
1752 
1754 {
1755  const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1756 
1757  ValidateNumInputs(workloadInfo, descriptorName, 1);
1758  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1759 
1760  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1761  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1762 
1763  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1764  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1765 
1766  std::vector<DataType> supportedTypes =
1767  {
1774  };
1775 
1776  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1777  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1778 
1779  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1780 
1781  if (m_Parameters.m_BlockSize == 0)
1782  {
1783  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1784  }
1785 
1786  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1787  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1788  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1789  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1790 
1791  const TensorShape& inputShape = inputTensorInfo.GetShape();
1792  if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1793  {
1794  throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1795  "by block size in all spatial dimensions");
1796  }
1797 
1798  const TensorShape& outputShape = outputTensorInfo.GetShape();
1799  if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1800  {
1801  throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1802  "must be divisible by the square of block size." );
1803  }
1804 }
1805 
1806 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1807 {
1808  const std::string descriptorName{"FloorQueueDescriptor"};
1809 
1810  ValidateNumInputs(workloadInfo, descriptorName, 1);
1811  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1812 
1813  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1814  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1815 
1816  std::vector<DataType> supportedTypes =
1817  {
1822  };
1823 
1824  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1825 
1826  if (inputTensorInfo != outputTensorInfo)
1827  {
1828  throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1829  }
1830 }
1831 
1832 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1833 {
1834  // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1835 
1836  const std::string descriptorName{"LstmQueueDescriptor"};
1837 
1838  // check dimensions of all inputs and outputs
1839  if (workloadInfo.m_InputTensorInfos.size() != 3)
1840  {
1841  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1842  }
1843  if (workloadInfo.m_OutputTensorInfos.size() != 4)
1844  {
1845  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1846  }
1847 
1848  std::vector<DataType> supportedTypes =
1849  {
1854  };
1855 
1856  // check for supported type of one input and match them with all the other input and output
1857  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1858 
1859  // type matches all other inputs
1860  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1861  {
1862  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1863  workloadInfo.m_InputTensorInfos[i],
1864  descriptorName,
1865  "input_0",
1866  "input_" + std::to_string(i));
1867  }
1868  // type matches all other outputs
1869  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1870  {
1871  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1872  workloadInfo.m_OutputTensorInfos[i],
1873  "LstmQueueDescriptor",
1874  "input_0",
1875  "output_" + std::to_string(i));
1876  }
1877 
1878  // Making sure clipping parameters have valid values.
1879  // == 0 means no clipping
1880  // > 0 means clipping
1881  if (m_Parameters.m_ClippingThresCell < 0.0f)
1882  {
1883  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
1884  }
1885  if (m_Parameters.m_ClippingThresProj < 0.0f)
1886  {
1887  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
1888  }
1889 
1890 
1891  // Inferring batch size, number of outputs and number of cells from the inputs.
1892  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1893  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1894  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1895  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1896  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1897  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1898 
1899  // input tensor
1900  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1901  descriptorName + " input_0");
1902  // outputStateInTensor
1903  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1904  descriptorName + " input_1");
1905  // outputStateInTensor
1906  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1907  descriptorName + " input_2");
1908  // scratchBufferTensor
1909  unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1910  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1911  descriptorName + " output_0");
1912  // outputStateOutTensor
1913  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1914  descriptorName + " output_1");
1915  // cellStateOutTensor
1916  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1917  descriptorName + " output_2");
1918  // outputTensor
1919  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1920  descriptorName + " output_3");
1921 
1922 
1923  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1924  if ( m_InputToInputWeights )
1925  {
1926  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1927  (n_cell * n_input), "InputLayerNormWeights");
1928  }
1929 
1930  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1931  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1932  (n_cell * n_input), "InputToForgetWeights");
1933 
1934  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1935  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1936  (n_cell * n_input), "InputToCellWeights");
1937 
1938  if ( m_RecurrentToInputWeights )
1939  {
1940  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1941  (n_cell * n_output), "RecurrentToInputWeights");
1942  }
1943 
1944  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1945  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1946  (n_cell * n_output), "RecurrentToForgetWeights");
1947 
1948  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1949  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1950  (n_cell * n_output), "RecurrentToCellWeights");
1951 
1952  // Make sure the input-gate's parameters are either both present (regular
1953  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1954  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1955  !m_Parameters.m_CifgEnabled) ||
1956  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1957  m_Parameters.m_CifgEnabled));
1958  if (!cifg_weights_all_or_none)
1959  {
1960  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
1961  "RecurrentToInputWeights must either both be present (regular LSTM) "
1962  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
1963  "accordingly.");
1964  }
1965 
1966  if ( m_CellToInputWeights )
1967  {
1968  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1969  n_cell, "CellToInputWeights");
1970  }
1971  if ( m_CellToForgetWeights )
1972  {
1973  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1974  n_cell, "CellToForgetWeights");
1975  }
1976  if ( m_CellToOutputWeights )
1977  {
1978  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1979  n_cell, "CellToOutputWeights");
1980  }
1981 
1982  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1983  bool peephole_weights_all_or_none =
1984  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1985  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1986  || ( !m_CellToInputWeights && !m_CellToForgetWeights
1987  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1988  if (!peephole_weights_all_or_none)
1989  {
1990  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
1991  }
1992 
1993  // Make sure the input gate bias is present only when not a CIFG-LSTM.
1994  if (m_Parameters.m_CifgEnabled)
1995  {
1996  if (m_InputGateBias)
1997  {
1998  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
1999  }
2000  }
2001  else
2002  {
2003  if (!m_InputGateBias)
2004  {
2005  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
2006  "must be present.");
2007  }
2008  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
2009  n_cell, "InputGateBias");
2010  }
2011 
2012  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
2013  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
2014 
2015  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
2016  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
2017 
2018  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
2019  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
2020 
2021  if (m_ProjectionWeights)
2022  {
2023  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
2024  (n_cell * n_output), "ProjectionWeights");
2025  }
2026  if (m_ProjectionBias)
2027  {
2028  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
2029  }
2030 
2031  // Making sure the projection tensors are consistent:
2032  // 1) If projection weight is not present, then projection bias should not be
2033  // present.
2034  // 2) If projection weight is present, then projection bias is optional.
2035  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
2036  !m_Parameters.m_ProjectionEnabled)
2037  || (m_ProjectionWeights && !m_ProjectionBias &&
2038  m_Parameters.m_ProjectionEnabled)
2039  || (m_ProjectionWeights && m_ProjectionBias &&
2040  m_Parameters.m_ProjectionEnabled));
2041  if (!projecton_tensors_consistent)
2042  {
2043  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
2044  }
2045 
2046  // The four layer normalization weights either all have values or none of them have values. Additionally, if
2047  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
2048  // either all have values or none of them have values. Layer normalization is used when the values of all the
2049  // layer normalization weights are present
2050  if (m_InputLayerNormWeights)
2051  {
2052  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
2053  }
2054  if (m_ForgetLayerNormWeights)
2055  {
2056  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2057  }
2058  if (m_CellLayerNormWeights)
2059  {
2060  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2061  }
2062  if (m_OutputLayerNormWeights)
2063  {
2064  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2065  }
2066 
2067  if (m_Parameters.m_LayerNormEnabled)
2068  {
2069  if (!m_Parameters.m_CifgEnabled)
2070  {
2071  if (!m_InputLayerNormWeights)
2072  {
2073  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
2074  "disabled but InputLayerNormWeights are not present");
2075  }
2076  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2077  1, n_cell, "InputLayerNormWeights");
2078  }
2079  else if (m_InputLayerNormWeights)
2080  {
2081  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
2082  "enabled");
2083  }
2084 
2085  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
2086  "ForgetLayerNormWeights");
2087  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2088 
2089  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
2090  "OutputLayerNormWeights");
2091  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2092 
2093  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
2094  "CellLayerNormWeights");
2095  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2096  }
2097  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2098  {
2099  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
2100  "normalisation weights are present.");
2101  }
2102 }
2103 
2105 {
2106  const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"};
2107 
2108  ValidateNumInputs(workloadInfo, descriptorName, 1);
2109  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2110 
2111  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2112  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2113 
2114  if (inputTensorInfo.GetDataType() != DataType::BFloat16)
2115  {
2116  throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16.");
2117  }
2118 
2119  if (outputTensorInfo.GetDataType() != DataType::Float32)
2120  {
2121  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2122  }
2123 
2124  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2125 }
2126 
2128 {
2129  const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
2130 
2131  ValidateNumInputs(workloadInfo, descriptorName, 1);
2132  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2133 
2134  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2135  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2136 
2137  if (inputTensorInfo.GetDataType() != DataType::Float32)
2138  {
2139  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2140  }
2141 
2142  if (outputTensorInfo.GetDataType() != DataType::BFloat16)
2143  {
2144  throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
2145  }
2146 
2147  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2148 }
2149 
2151 {
2152  const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
2153 
2154  ValidateNumInputs(workloadInfo, descriptorName, 1);
2155  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2156 
2157  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2158  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2159 
2160  if (inputTensorInfo.GetDataType() != DataType::Float32)
2161  {
2162  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2163  }
2164 
2165  if (outputTensorInfo.GetDataType() != DataType::Float16)
2166  {
2167  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
2168  }
2169 
2170  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2171 }
2172 
2174 {
2175  const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
2176 
2177  ValidateNumInputs(workloadInfo, descriptorName, 1);
2178  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2179 
2180  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2181  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2182 
2183  if (inputTensorInfo.GetDataType() != DataType::Float16)
2184  {
2185  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
2186  }
2187 
2188  if (outputTensorInfo.GetDataType() != DataType::Float32)
2189  {
2190  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2191  }
2192 
2193  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2194 }
2195 
2196 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2197 {
2198  const std::string descriptorName{"DivisionQueueDescriptor"};
2199 
2200  ValidateNumInputs(workloadInfo, descriptorName, 2);
2201  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2202 
2203  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2204  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2205  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2206 
2207  std::vector<DataType> supportedTypes =
2208  {
2216  };
2217 
2218  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2219  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2220  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2221 
2222  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2223  inputTensorInfo1,
2224  outputTensorInfo,
2225  descriptorName,
2226  "input_0",
2227  "input_1");
2228 }
2229 
2231 {
2232  const std::string descriptorName{"SubtractionQueueDescriptor"};
2233 
2234  ValidateNumInputs(workloadInfo, descriptorName, 2);
2235  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2236 
2237  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2238  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2239  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2240 
2241  std::vector<DataType> supportedTypes =
2242  {
2250  };
2251 
2252  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2253  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2254  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2255 
2256  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2257  inputTensorInfo1,
2258  outputTensorInfo,
2259  descriptorName,
2260  "input_0",
2261  "input_1");
2262 }
2263 
2264 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2265 {
2266  const std::string descriptorName{"MaximumQueueDescriptor"};
2267 
2268  ValidateNumInputs(workloadInfo, descriptorName, 2);
2269  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2270 
2271  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2272  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2273  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2274 
2275  std::vector<DataType> supportedTypes =
2276  {
2284  };
2285 
2286  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2287  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2288  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2289 
2290  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2291  inputTensorInfo1,
2292  outputTensorInfo,
2293  descriptorName,
2294  "input_0",
2295  "input_1");
2296 }
2297 
2298 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2299 {
2300  const std::string descriptorName{"MeanQueueDescriptor"};
2301 
2302  ValidateNumInputs(workloadInfo, descriptorName, 1);
2303  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2304 
2305  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2306  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2307 
2308  std::vector<DataType> supportedTypes =
2309  {
2316  };
2317 
2318  // First check if input tensor data type is supported, then
2319  // check if this data type matches the output tensor data type
2320  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2321  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2322 
2323  if (m_Parameters.m_KeepDims)
2324  {
2325  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2326  }
2327  else if (m_Parameters.m_Axis.empty())
2328  {
2329  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
2330  }
2331  else
2332  {
2333  unsigned int outputDim =
2334  inputTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
2335  ValidateTensorNumDimensions(outputTensorInfo,
2336  descriptorName,
2337  outputDim > 0 ? outputDim : 1,
2338  "output");
2339  }
2340 }
2341 
2342 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2343 {
2344  const std::string descriptorName{"PadQueueDescriptor"};
2345 
2346  ValidateNumInputs(workloadInfo, descriptorName, 1);
2347  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2348 
2349  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2350  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2351 
2352  // input and output should have the same number of dimensions
2353  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2354 
2355  // there should be entry in the pad list for each dimension in the input tensor
2356  if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2357  throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2358  "as there are dimensions in the input tensor that is " +
2359  std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2360  " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
2361  }
2362 }
2363 
2364 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2365 {
2366  const std::string descriptorName{"QuantizeQueueDescriptor"};
2367 
2368  ValidateNumInputs(workloadInfo, descriptorName, 1);
2369  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2370 
2371  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2372  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2373 
2374  std::vector<DataType> supportedTypes =
2375  {
2383  };
2384 
2385  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2386 
2387  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2388  {
2389  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2390  }
2391 }
2392 
2394 {
2395  const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
2396 
2397  ValidateNumInputs(workloadInfo, descriptorName, 1);
2398  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2399 
2400  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2401  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2402 
2403  std::vector<DataType> supportedTypes =
2404  {
2411  };
2412 
2413  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2414  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2415 }
2416 
2418 {
2419  const std::string descriptorName{"StridedSliceQueueDescriptor"};
2420 
2421  ValidateNumInputs(workloadInfo, descriptorName, 1);
2422  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2423 
2424  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2425  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2426 
2427  std::vector<DataType> supportedTypes =
2428  {
2435  };
2436 
2437  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2438  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2439 
2440  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2441 
2442  const uint32_t rank = inputTensorInfo.GetNumDimensions();
2443  if (rank > 4)
2444  {
2445  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2446  }
2447 
2448  // Begin, End & Stride length must be of rank(input0)
2449  if (m_Parameters.m_Begin.size() != rank)
2450  {
2451  throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2452  }
2453 
2454  if (m_Parameters.m_End.size() != rank)
2455  {
2456  throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2457  }
2458 
2459  if (m_Parameters.m_Stride.size() != rank)
2460  {
2461  throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2462  }
2463 
2464  // Stride entries must be non-zero
2465  for (auto& stride : m_Parameters.m_Stride)
2466  {
2467  if (stride == 0)
2468  {
2469  throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2470  }
2471  }
2472 }
2473 
2474 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2475 {
2476  const std::string descriptorName{"MinimumQueueDescriptor"};
2477 
2478  ValidateNumInputs(workloadInfo, descriptorName, 2);
2479  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2480 
2481  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2482  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2483  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2484 
2485  std::vector<DataType> supportedTypes =
2486  {
2494  };
2495 
2496  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2497  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2498  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2499 
2500  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2501  inputTensorInfo1,
2502  outputTensorInfo,
2503  descriptorName,
2504  "input_0",
2505  "input_1");
2506 }
2507 
2508 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2509 {
2510  const std::string descriptorName{"DebugQueueDescriptor"};
2511 
2512  ValidateNumInputs(workloadInfo, descriptorName, 1);
2513  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2514 }
2515 
2516 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2517 {
2518  const std::string descriptorName{"EqualQueueDescriptor"};
2519 
2520  ValidateNumInputs(workloadInfo, descriptorName, 2);
2521  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2522 
2523  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2524  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2525  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2526 
2527  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2528  inputTensorInfo1,
2529  outputTensorInfo,
2530  descriptorName,
2531  "input_0",
2532  "input_1");
2533 
2534  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2535  {
2536  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2537  }
2538 }
2539 
2540 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2541 {
2542  const std::string descriptorName{"GreaterQueueDescriptor"};
2543 
2544  ValidateNumInputs(workloadInfo, descriptorName, 2);
2545  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2546 
2547  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2548  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2549  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2550 
2551  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2552  inputTensorInfo1,
2553  outputTensorInfo,
2554  descriptorName,
2555  "input_0",
2556  "input_1");
2557 
2558  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2559  {
2560  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2561  }
2562 }
2563 
2564 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2565 {
2566  const std::string descriptorName{"RsqrtQueueDescriptor"};
2567 
2568  ValidateNumInputs(workloadInfo, descriptorName, 1);
2569  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2570 
2571  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2572  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2573 
2574  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2575 
2576  std::vector<DataType> supportedTypes =
2577  {
2584  };
2585 
2586  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2587  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2588 }
2589 
2590 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2591 {
2592  const std::string descriptorName{"GatherQueueDescriptor"};
2593 
2594  ValidateNumInputs(workloadInfo, descriptorName, 2);
2595  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2596 
2597  const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2598  if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2599  {
2600  throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2601  }
2602 
2603  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2604  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2605 
2606  std::vector<DataType> supportedTypes =
2607  {
2615  };
2616 
2617  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2618 
2619  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2620 
2621  unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2622  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2623 }
2624 
2626 {
2627  const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2628 
2629  ValidateNumInputs(workloadInfo, descriptorName, 2);
2630 
2631  if (workloadInfo.m_OutputTensorInfos.size() != 4)
2632  {
2633  throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2634  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2635  }
2636 
2637  if (m_Anchors == nullptr)
2638  {
2639  throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2640  }
2641 
2642  const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2643  const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2644  const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2645 
2646  const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2647  const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2648  const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2649  const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2650 
2651  ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2652  ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2653  ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2654 
2655  const std::vector<DataType> supportedInputTypes =
2656  {
2663  };
2664 
2665  ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2666  ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2667  ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2668 
2669  ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2670  ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2671  ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2672  ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2673 
2674  // NOTE: Output is always Float32 regardless of input type
2675  ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2676  ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2677  ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2678  ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2679 
2680  if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2681  {
2682  throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2683  "must be positive and less than or equal to 1.");
2684  }
2685 
2686  if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2687  {
2688  throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2689  "should be equal to number of classes + 1.");
2690  }
2691 }
2692 
2693 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2694 {
2695  const std::string& descriptorName{"DequantizeQueueDescriptor"};
2696 
2697  ValidateNumInputs(workloadInfo, descriptorName, 1);
2698  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2699 
2700  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2701  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2702 
2703  if (!IsQuantizedType(inputTensorInfo.GetDataType()))
2704  {
2705  throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2706  }
2707 
2708  std::vector<DataType> supportedTypes =
2709  {
2713  };
2714 
2715  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2716 }
2717 
2718 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2719 {
2720  const std::string& descriptorName{"MergeQueueDescriptor"};
2721 
2722  ValidateNumInputs(workloadInfo, descriptorName, 2);
2723  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2724 
2725  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2726  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2727  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2728 
2729  ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2730  ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2731 
2732  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2733  ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2734 }
2735 
2736 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2737 {
2738  const std::string& descriptorName{"SwitchQueueDescriptor"};
2739 
2740  ValidateNumInputs(workloadInfo, descriptorName, 2);
2741  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2742 
2743  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2744  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2745 
2746  const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2747  const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2748 
2749  std::vector<DataType> supportedTypes =
2750  {
2756  };
2757 
2758  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2759  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2760 
2761  ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2762  ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2763 
2764  ValidateTensorShapesMatch(inputTensorInfo0,
2765  outputTensorInfo0,
2766  descriptorName,
2767  "input_0",
2768  "output_0");
2769 
2770  ValidateTensorShapesMatch(inputTensorInfo0,
2771  outputTensorInfo1,
2772  descriptorName,
2773  "input_0",
2774  "output_1");
2775 }
2776 
2777 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
2778 {
2779  // This is internally generated so it should not need validation.
2780 }
2781 
2782 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2783 {
2784  const std::string& descriptorName{"PreluQueueDescriptor"};
2785 
2786  ValidateNumInputs(workloadInfo, descriptorName, 2);
2787  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2788 
2789  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2790  const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2791  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2792 
2793  std::vector<DataType> supportedTypes
2794  {
2801  };
2802 
2803  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2804  ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2805 
2806  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2807 
2808  ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2809  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2810 
2811  ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2812  alphaTensorInfo,
2813  outputTensorInfo,
2814  descriptorName,
2815  "input",
2816  "alpha");
2817 }
2818 
2820 {
2821  const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2822 
2823  ValidateNumInputs(workloadInfo, descriptorName, 1);
2824  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2825 
2826  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2827  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2828 
2829  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2830  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2831 
2832  ValidatePointer(m_Weight, descriptorName, "weight");
2833 
2834  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2835  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2836 
2837  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2838 
2839  Optional<TensorInfo> optionalBiasTensorInfo;
2840  if (m_Parameters.m_BiasEnabled)
2841  {
2842  ValidatePointer(m_Bias, descriptorName, "bias");
2843 
2844  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2845  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
2846 
2847  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
2848  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2849  }
2850 
2851  ValidatePerAxisQuantization(inputTensorInfo,
2852  outputTensorInfo,
2853  weightTensorInfo,
2854  optionalBiasTensorInfo,
2855  descriptorName);
2856 
2857  std::vector<DataType> supportedTypes =
2858  {
2865  };
2866 
2867  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2868  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2869 }
2870 
2871 void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2872 {
2873  const std::string descriptorName{"TransposeQueueDescriptor"};
2874 
2875  ValidateNumInputs(workloadInfo, descriptorName, 1);
2876  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2877 
2878  const PermutationVector& mapping = m_Parameters.m_DimMappings;
2879 
2880  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2881  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2882 
2883  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
2884  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
2885 
2886  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
2887  {
2888  if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
2889  {
2890  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
2891  " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
2892  "must match dst dimension " + to_string(i) +
2893  " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
2894  }
2895  }
2896 
2897  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2898 }
2899 
2900 void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2901 {
2902  const std::string descriptorName{"QLstmQueueDescriptor"};
2903 
2904  // Validate number of inputs/outputs
2905  ValidateNumInputs(workloadInfo, descriptorName, 3);
2906  ValidateNumOutputs(workloadInfo, descriptorName, 3);
2907 
2908  // Input/output tensor info
2909  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2910  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[1];
2911  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[2];
2912 
2913  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2914  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2915  auto outputInfo = workloadInfo.m_OutputTensorInfos[2];
2916 
2917  // Supported types for various tensors in QLSTM
2918  std::vector<DataType> inputOutputSupportedTypes =
2919  {
2921  };
2922 
2923  std::vector<DataType> cellStateSupportedTypes =
2924  {
2926  };
2927 
2928  std::vector<DataType> weightsSupportedTypes =
2929  {
2931  };
2932 
2933  std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
2934  {
2936  };
2937 
2938  std::vector<DataType> biasSupportedTypes =
2939  {
2941  };
2942 
2943  // Validate types of input/output tensors
2944  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2945  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2946  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2947 
2948  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2949  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2950  ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
2951 
2952  // Validate matching types of input/output tensors
2953  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2954  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2955  "outputStateIn", "outputStateOut");
2956  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2957 
2958  // Infer number of batches, number of units, input size and output size from tensor dimensions
2959  const uint32_t numBatches = inputInfo.GetShape()[0];
2960  const uint32_t inputSize = inputInfo.GetShape()[1];
2961  const uint32_t outputSize = outputStateInInfo.GetShape()[1];
2962  const uint32_t numUnits = cellStateInInfo.GetShape()[1];
2963 
2964  // Validate number of dimensions and number of elements for input/output tensors
2965  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
2966  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
2967  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName + " cellStateIn");
2968 
2969  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
2970  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName + " cellStateOut");
2971  ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName + " output");
2972 
2973  // Validate number of dimensions and number of elements for MANDATORY weight tensors
2974  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
2975  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2976  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize), " InputToForgetWeights");
2977 
2978  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
2979  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2980  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize), " InputToCellWeights");
2981 
2982  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
2983  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2984  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize), " InputToOutputWeights");
2985 
2986  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
2987  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2988  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
2989  " RecurrentToForgetWeights");
2990 
2991  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
2992  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2993  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
2994 
2995  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
2996  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2997  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
2998 
2999  // Validate data types for MANDATORY weights tensors (all should match each other)
3000  ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
3001 
3002  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
3003  "inputToForgetWeights", "inputToCellWeights");
3004  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3005  "inputToForgetWeights", "inputToOutputWeights");
3006 
3007  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3008  "inputToForgetWeights", "recurrentToForgeteights");
3009  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3010  "inputToForgetWeights", "recurrentToCellWeights");
3011  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3012  "inputToForgetWeights", "recurrentToOutputWeights");
3013 
3014  // Validate number of dimensions and number of elements for MANDATORY bias tensors
3015  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3016  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3017  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits, " ForgetGateBias");
3018 
3019  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3020  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3021  ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits, " CellBias");
3022 
3023  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3024  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3025  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits, " OutputGateBias");
3026 
3027  // Validate data types for MANDATORY bias tensors
3028  ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
3029 
3030  ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
3031  "forgetGateBias", "cellBias");
3032  ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
3033  "forgetGateBias", "outputGateBias");
3034 
3035  // Validate OPTIONAL params: CIFG (inputToInputWeights, recurrentToInputWeights, inputGateBias)
3036  const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
3037  !m_Parameters.m_CifgEnabled) ||
3038  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3039  !m_InputGateBias && m_Parameters.m_CifgEnabled));
3040 
3041  if (!allCifgParamsPresentOrNot)
3042  {
3043  throw InvalidArgumentException(descriptorName +
3044  ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present "
3045  "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be "
3046  "set appropriately.");
3047  }
3048 
3049  if (!m_Parameters.m_CifgEnabled)
3050  {
3051  // Validate number of dimensions and number of elements
3052  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3053  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize), " InputToInputWeights");
3054 
3055  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3056  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3057  " RecurrentToInputWeights");
3058 
3059  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3060  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits, " InputGateBias");
3061 
3062  // Validate data types
3063  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3064  "inputToForgetWeights", "inputToInputWeights");
3065  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3066  "inputToForgetWeights", "recurrentToInputWeights");
3067  ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3068  "forgetGateBias", "inputGateBias");
3069  }
3070 
3071  // Validate OPTIONAL params: Peephole (cellToInputWeights, cellToForgetWeights, cellToOutputWeights)
3072  bool allPeepholeWeightsPresentOrNot =
3073  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3074  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3075  || (!m_CellToInputWeights && !m_CellToForgetWeights
3076  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3077 
3078  if (!allPeepholeWeightsPresentOrNot)
3079  {
3080  throw InvalidArgumentException(descriptorName +
3081  ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole "
3082  "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present "
3083  "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set "
3084  "appropriately.");
3085  }
3086 
3087  if (m_Parameters.m_PeepholeEnabled)
3088  {
3089  auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3090  ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits, " cellToForgetWeights");
3091  ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3092 
3093  auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3094  ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits, " cellToOutputWeights");
3095  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3096  "cellToForgetWeight", "cellToOutputWeights");
3097 
3098  if (!m_Parameters.m_CifgEnabled)
3099  {
3100  auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3101  ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits, " cellToInputWeights");
3102  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3103  "cellToForgetWeights", "cellToInputWeights");
3104  }
3105  }
3106 
3107  // Validate OPTIONAL params: Layer Norm Weights
3108  bool allLayerNormWeightsPresentOrNot =
3109  (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3110  && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3111  || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3112  && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3113 
3114  if (!allLayerNormWeightsPresentOrNot)
3115  {
3116  throw InvalidArgumentException(descriptorName +
3117  ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights "
3118  "and CellLayerNormWeights should all be present (Layer Norm enabled) or not "
3119  "be present at all (Layer Norm disabled). InputLayerNormWeights should "
3120  "only be present when Layer Norm is enabled and CIFG is disabled. "
3121  "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3122  }
3123 
3124  if (m_Parameters.m_LayerNormEnabled)
3125  {
3126  auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3127  ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits, " forgetLayerNormWeights");
3128  ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3129 
3130  auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3131  ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits, " cellLayerNormWeights");
3132  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3133  "forgetLayerNormWeights", "cellLayerNormWeights");
3134 
3135  auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3136  ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits, " outputLayerNormWeights");
3137  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3138  "forgetLayerNormWeights", "outputLayerNormWeights");
3139 
3140  if (!m_Parameters.m_CifgEnabled)
3141  {
3142  auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3143  ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits, " inputLayerNormWeights");
3144  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3145  "forgetLayerNormWeights", "inputLayerNormWeights");
3146  }
3147  }
3148 
3149  // Validate OPTIONAL params: Projection (projectionWeights, projectionBias)
3150  bool correctProjectionTensorsPresent =
3151  ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3152  (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3153  (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3154 
3155  if (!correctProjectionTensorsPresent)
3156  {
3157  throw InvalidArgumentException(descriptorName +
3158  ": If projection is enabled, ProjectionWeights should be present and "
3159  "ProjectionBias is optional. If projection is disabled, neither "
3160  "ProjectionWeights nor ProjectionBias should be present.");
3161  }
3162 
3163  if (m_Parameters.m_ProjectionEnabled)
3164  {
3165  auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3166  ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize), "ProjectionWeights");
3167  ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3168 
3169  if (m_ProjectionBias)
3170  {
3171  auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
3172  ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize, "ProjectionBias");
3173  ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3174  }
3175 
3176  }
3177  else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3178  outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3179  throw InvalidArgumentException(descriptorName +
3180  ": If projection is disabled, output quantization info (scale, offset) "
3181  "should match HiddenStateScale and HiddenStateZeroPoint.");
3182  }
3183 
3184 }
3185 
3187 {
3188  const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
3189 
3190  // Validate number of inputs/outputs
3191  ValidateNumInputs(workloadInfo, descriptorName, 3);
3192  ValidateNumOutputs(workloadInfo, descriptorName, 2);
3193 
3194  // Input/output tensor infos
3195  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3196  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
3197  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
3198 
3199  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3200  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3201 
3202  std::vector<DataType> inputOutputSupportedTypes =
3203  {
3205  };
3206 
3207  std::vector<DataType> cellStateSupportedTypes =
3208  {
3210  };
3211 
3212  std::vector<DataType> weightsSupportedTypes =
3213  {
3215  };
3216 
3217  std::vector<DataType> biasSupportedTypes =
3218  {
3220  };
3221 
3222  // Validate types of input/output tensors
3223  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3224  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3225  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3226 
3227  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3228  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3229 
3230  // Validate matching types of input/output tensors
3231  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3232  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3233  "outputStateIn", "outputStateOut");
3234  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3235 
3236  // Validate matching quantization info for input/output tensors
3237  ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3238  ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
3239  ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3240 
3241  // Infer number of batches, input size and output size from tensor dimensions
3242  const uint32_t numBatches = inputInfo.GetShape()[0];
3243  const uint32_t inputSize = inputInfo.GetShape()[1];
3244  const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3245 
3246  // Validate number of dimensions and number of elements for input/output tensors
3247  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3248  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
3249  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3250  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
3251  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3252 
3253  // Validate number of dimensions and number of elements for weights tensors
3254  ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
3255  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3256  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
3257 
3258  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3259  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3260  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
3261 
3262  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3263  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3264  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
3265 
3266  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3267  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3268  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
3269 
3270  ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
3271  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3272  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
3273 
3274  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3275  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3276  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3277  " RecurrentToForgetWeights");
3278 
3279  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3280  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3281  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3282 
3283  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3284  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3285  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3286 
3287  // Validate data types for weights tensors (all should match each other)
3288  ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3289 
3290  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3291  "inputToInputWeights", "inputToForgetWeights");
3292  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3293  "inputToInputWeights", "inputToCellWeights");
3294  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3295  "inputToInputWeights", "inputToOutputWeights");
3296 
3297  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3298  "inputToInputWeights", "recurrentToInputWeights");
3299  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3300  "inputToInputWeights", "recurrentToForgeteights");
3301  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3302  "inputToInputWeights", "recurrentToCellWeights");
3303  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3304  "inputToInputWeights", "recurrentToOutputWeights");
3305 
3306  // Validate matching quantization info for weight tensors (all should match each other)
3307  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3308  descriptorName, "inputToInputWeights", "inputToForgetWeights");
3309  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3310  descriptorName, "inputToInputWeights", "inputToCellWeights");
3311  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3312  descriptorName, "inputToInputWeights", "inputToOutputWeights");
3313 
3314  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3315  descriptorName, "inputToInputWeights", "recurrentToInputWeights");
3316  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3317  descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
3318  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3319  descriptorName, "inputToInputWeights", "recurrentToCellWeights");
3320  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3321  descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
3322 
3323  // Validate number of dimensions and number of elements in bias tensors
3324  ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
3325  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3326  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
3327 
3328  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3329  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3330  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
3331 
3332  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3333  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3334  ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
3335 
3336  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3337  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3338  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
3339 
3340  // Validate data types for bias tensors (all should match each other)
3341  ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3342 
3343  ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3344  "inputGateBias", "forgetGateBias");
3345  ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3346  "inputGateBias", "cellBias");
3347  ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3348  "inputGateBias", "outputGateBias");
3349 
3350  // Validate bias tensor quantization info
3351  ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3352  ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3353  ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3354  ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3355 }
3356 
3357 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3358 {
3359  const std::string descriptorName{"AbsQueueDescriptor"};
3360 
3361  ValidateNumInputs(workloadInfo, descriptorName, 1);
3362  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3363 
3364  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3365  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3366 
3367  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3368 
3369  std::vector<DataType> supportedTypes =
3370  {
3378  };
3379 
3380  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3381  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3382 }
3383 
3384 void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3385 {
3386  const std::string descriptorName{"SliceQueueDescriptor"};
3387 
3388  ValidateNumInputs(workloadInfo, descriptorName, 1);
3389  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3390 
3391  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3392  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3393 
3394  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3395 
3396  const unsigned int rank = inputTensorInfo.GetNumDimensions();
3397  if (rank > 4)
3398  {
3399  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
3400  }
3401 
3402  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
3403 
3404  // Check if m_Begin and m_Size have the expected length
3405  if (m_Parameters.m_Begin.size() != rank)
3406  {
3407  throw InvalidArgumentException(descriptorName +
3408  ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3409  }
3410  if (m_Parameters.m_Size.size() != rank)
3411  {
3412  throw InvalidArgumentException(descriptorName +
3413  ": Length of size descriptor must equal rank " + std::to_string(rank));
3414  }
3415 
3416  // Check if the shape of the output tensor matches m_Size
3417  const TensorShape& outputShape = outputTensorInfo.GetShape();
3418  for (unsigned int i = 0u; i < rank; ++i)
3419  {
3420  if (m_Parameters.m_Size[i] != outputShape[i])
3421  {
3422  throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
3423  }
3424  }
3425 
3426  // Check if the sum of begin offset and size in a given dimension
3427  // does not exceed the size of corresponding input
3428  const TensorShape& inputShape = inputTensorInfo.GetShape();
3429  for(unsigned int i = 0u; i < rank; ++i)
3430  {
3431  if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
3432  {
3433  throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
3434  std::to_string(i) + " exceeds input size.");
3435  }
3436  }
3437 }
3438 
3440 {
3441  const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
3442 
3443  ValidateNumInputs(workloadInfo, descriptorName, 1);
3444  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3445 
3446  const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
3447  const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
3448 
3449  ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
3450  ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
3451 
3452  std::vector<DataType> supportedTypes =
3453  {
3460  };
3461 
3462  ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3463  ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3464 
3465  ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
3466 
3467  if (m_Parameters.m_BlockSize == 0)
3468  {
3469  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
3470  }
3471 
3472  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
3473  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
3474  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
3475  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
3476 
3477  const TensorShape& outputShape = outputInfo.GetShape();
3478  if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3479  {
3480  throw InvalidArgumentException(descriptorName + ": Output width and height shape"
3481  "must be divisible by block size.");
3482  }
3483 
3484  const TensorShape& inputShape = inputInfo.GetShape();
3485  if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3486  {
3487  throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
3488  "must be divisible by the square of block size." );
3489  }
3490 }
3491 
3492 void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3493 {
3494  const std::string descriptorName{"ComparisonQueueDescriptor"};
3495 
3496  ValidateNumInputs(workloadInfo, descriptorName, 2);
3497  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3498 
3499  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3500  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3501  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3502 
3503  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3504  inputTensorInfo1,
3505  outputTensorInfo,
3506  descriptorName,
3507  "input_0",
3508  "input_1");
3509 
3510  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3511  {
3512  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3513  }
3514 }
3515 
3517 {
3518  const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3519 
3520  ValidateNumInputs(workloadInfo, descriptorName, 1);
3521  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3522 
3523  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3524  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3525 
3526  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3527 
3528  std::vector<DataType> supportedTypes =
3529  {
3537  };
3538 
3539  std::vector<DataType> logicalSupportedTypes =
3540  {
3542  };
3543 
3544  if (m_Parameters.m_Operation == UnaryOperation::LogicalNot)
3545  {
3546  ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
3547  }
3548  else
3549  {
3550  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3551  }
3552 
3553 
3554  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3555 }
3556 
3557 void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3558 {
3559  const std::string descriptorName{"RankQueueDescriptor"};
3560 
3561  ValidateNumInputs(workloadInfo, descriptorName, 1);
3562  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3563 
3564  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3565  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3566 
3567  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
3568  ValidateTensorNumElements(outputTensorInfo, descriptorName, 1, "output");
3569 
3570  std::vector<DataType> supportedTypes =
3571  {
3580  };
3581 
3582  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3583  ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
3584 }
3585 
3587 {
3588  const std::string descriptorName{"LogicalBinaryQueueDescriptor"};
3589 
3590  ValidateNumInputs(workloadInfo, descriptorName, 2);
3591  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3592 
3593  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3594  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3595  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3596 
3597  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3598  inputTensorInfo1,
3599  outputTensorInfo,
3600  descriptorName,
3601  "input_0",
3602  "input_1");
3603 
3604  if (inputTensorInfo0.GetDataType() != DataType::Boolean)
3605  {
3606  throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean.");
3607  }
3608 
3609  if (inputTensorInfo1.GetDataType() != DataType::Boolean)
3610  {
3611  throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean.");
3612  }
3613 
3614  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3615  {
3616  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3617  }
3618 }
3619 
3620 } // namespace armnn
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:423
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:249
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:437
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:485
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2020 ARM Limited.
void Validate(const WorkloadInfo &workloadInfo) const
SizeType GetSize() const
Definition: Types.hpp:233
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:442
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:197
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:180
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:254
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
float GetQuantizationScale() const
Definition: Tensor.cpp:452
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
Definition: Tensor.hpp:194
bool has_value() const noexcept
Definition: Optional.hpp:53
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
Contains information about inputs and outputs to a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
unsigned int GetChannelsIndex() const
bool IsQuantized() const
Definition: Tensor.cpp:495
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
Definition: Tensor.hpp:192
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin