ArmNN
 21.05
WorkloadData.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
11 #include <armnn/Logging.hpp>
12 
13 #include <algorithm>
14 #include <iomanip>
15 #include <string>
16 #include <sstream>
17 
18 #include <fmt/format.h>
19 
20 using namespace armnnUtils;
21 
22 namespace armnn
23 {
24 
25 //---------------------------------------------------------------
27 {
28  switch (inputDataType)
29  {
30  case DataType::Float16:
31  return DataType::Float16;
32  case DataType::BFloat16:
33  case DataType::Float32:
34  return DataType::Float32;
35  case DataType::QAsymmS8:
36  return DataType::Signed32;
37  case DataType::QAsymmU8:
38  return DataType::Signed32;
39  case DataType::QSymmS8:
40  return DataType::Signed32;
41  case DataType::QSymmS16:
42  return DataType::Signed32;
43  default:
44  ARMNN_ASSERT_MSG(false, "Invalid input data type");
45  return DataType::Float32;
46  }
47 }
48 
49 namespace
50 {
51 
52 //---------------------------------------------------------------
53 //android ndk does not support std::to_string function.
54 template <typename T>
55 std::string to_string(T value)
56 {
57  std::ostringstream os;
58  os << value;
59  return os.str();
60 }
61 
62 //---------------------------------------------------------------
63 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
64 {
65  if (!ptr)
66  {
67  throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
68  paramName + " parameter must be set.");
69  }
70 }
71 
72 //---------------------------------------------------------------
73 void ValidateTensorShapesMatch(const TensorInfo& first,
74  const TensorInfo& second,
75  std::string const& descName,
76  std::string const& firstName,
77  std::string const& secondName)
78 {
79  if (first.GetShape() != second.GetShape())
80  {
81  throw InvalidArgumentException(descName + ": "
82  + firstName + " & " + secondName + " must have identical shapes");
83  }
84 }
85 
86 //---------------------------------------------------------------
87 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
88 {
89  if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
90  {
91  throw InvalidArgumentException(descName +
92  ": Requires exactly " + to_string(expectedSize) + "input(s). " +
93  to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
94  }
95 }
96 
97 //---------------------------------------------------------------
98 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
99 {
100  if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
101  {
102  throw InvalidArgumentException(descName +
103  ": Requires exactly " + to_string(expectedSize) + " output(s). " +
104  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
105  }
106 }
107 
108 //---------------------------------------------------------------
109 void ValidateTensorNumDimensions(const TensorInfo& tensor,
110  std::string const& descName,
111  unsigned int numDimensions,
112  std::string const& tensorName)
113 {
114  if (tensor.GetNumDimensions() != numDimensions)
115  {
116  throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
117  to_string(tensor.GetNumDimensions()) + " dimensions for " +
118  tensorName + " tensor.");
119  }
120 }
121 
122 //---------------------------------------------------------------
123 void ValidateTensorNumElements(const TensorInfo& tensor,
124  std::string const& descName,
125  unsigned int numElements,
126  std::string const& tensorName)
127 {
128  if (tensor.GetNumElements() != numElements)
129  {
130  throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
131  to_string(tensor.GetNumElements()) + " elements for " +
132  tensorName + " tensor.");
133  }
134 }
135 
136 //---------------------------------------------------------------
137 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
138  unsigned int numDimension,
139  unsigned int numElements,
140  std::string const& tensorName)
141 {
142  const std::string functionName{"ValidateTensorNumDimNumElem"};
143  ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
144  ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
145 }
146 
147 //---------------------------------------------------------------
148 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
149  const std::string& descName, std::string const& tensorName)
150 {
151  if (tensor.GetDataType() != dataType)
152  {
153  throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
154  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
155  }
156 }
157 
158 void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
159 {
161  if (tensor.GetDataType() != DataType::QSymmS8 &&
162  tensor.GetDataType() != DataType::QuantizedSymm8PerAxis)
163  {
164  throw InvalidArgumentException(descName +
165  ": Expected data type which supports per-axis quantization scheme but got " +
166  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
167  }
169 }
170 
171 //---------------------------------------------------------------
172 void ValidateTensorQuantizationSpace(const TensorInfo& first,
173  const TensorInfo& second,
174  const std::string& descName,
175  std::string const& firstName,
176  std::string const& secondName)
177 {
178  if (!first.IsQuantized() ||
179  !second.IsQuantized())
180  {
181  // Not a quantized type, ignore the validation
182  return;
183  }
184 
185  DataType firstDataType = first.GetDataType();
186  DataType secondDataType = second.GetDataType();
187 
188  if (firstDataType != secondDataType)
189  {
190  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
191  " must be of the same quantized type, " +
192  firstName + " is " + GetDataTypeName(firstDataType) + ", " +
193  secondName + " is " + GetDataTypeName(secondDataType));
194  }
195 
196  if (!first.IsTypeSpaceMatch(second))
197  {
198  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
199  " must have the same quantization space, " +
200  firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
201  " and scale " + to_string(first.GetQuantizationScale()) + ", " +
202  secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
203  " and scale " + to_string(second.GetQuantizationScale()));
204  }
205 }
206 
207 //---------------------------------------------------------------
208 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
209  const TensorInfo& inputTensorInfo,
210  const TensorInfo& weightsTensorInfo,
211  const std::string& descName)
212 {
213  // Helper lambda function to validate a single bias quantization scale value
214  auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
215  {
216  constexpr float tolerance = 0.0001f;
217  if (std::abs(biasScale - expectedScale) > tolerance)
218  {
219  // Print the float values with extra precision to see very small differences
220  ARMNN_LOG(warning) << std::setprecision(6) << descName << ": Expected " << expectedScale <<
221  " for bias quantization scale (product of input and weight scales), but got " <<
222  biasScale << ". Using scale provided.";
223  }
224  };
225 
226  if (biasTensor.GetQuantizationOffset() != 0)
227  {
228  throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
229  to_string(biasTensor.GetQuantizationOffset()));
230  }
231 
232  if (biasTensor.HasMultipleQuantizationScales() || weightsTensorInfo.HasMultipleQuantizationScales())
233  {
234  // Validate per-axis quantization scales
235  const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
236  const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
237 
238  if (weightScales.size() != biasScales.size())
239  {
240  std::stringstream msg;
241  msg << descName << ": Expected matching number of per-axis quantization scales for weights and bias, "
242  << "but got different values. This is currently unsupported: weights=" << weightScales.size()
243  << ", biases=" << biasScales.size();
244  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
245  }
246 
247  for (size_t i = 0ul; i < biasScales.size(); ++i)
248  {
249  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
250  VerifyBiasQuantizationScale(biasScales[i], expectedScale);
251  }
252  }
253  else
254  {
255  // Validate per-tensor quantization scale
256  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
257  VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
258  }
259 }
260 
261 //---------------------------------------------------------------
262 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
263  unsigned int numExpected,
264  const std::string& descName,
265  const std::string& varName)
266 {
267  if (vec.empty() && numExpected > 0)
268  {
269  throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
270  }
271 
272  for (unsigned int i = 0; i < numExpected; ++i)
273  {
274  if (!vec[i])
275  {
276  throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
277  }
278  }
279 }
280 
281 //---------------------------------------------------------------
282 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
283  const TensorInfo& second,
284  const TensorInfo& output,
285  std::string const& descName,
286  std::string const& firstName,
287  std::string const& secondName)
288 {
289  // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
290  // broadcasted.
291  if (first.GetNumDimensions() != second.GetNumDimensions())
292  {
293  throw InvalidArgumentException(descName + ": Tensors "
294  + firstName + " & " + secondName
295  + " must have the same number of dimensions in order to be broadcasted");
296  }
297  uint32_t numDims = first.GetNumDimensions();
298  std::vector<uint32_t> outputDims(numDims, 0u);
299  for (uint32_t i = 0; i < numDims; i++)
300  {
301  const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
302  const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
303  if (dimsNotEqual && dimsNotOne)
304  {
305  throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
306  }
307  outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
308  }
309  TensorShape broadcastShape = TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
310  if (broadcastShape != output.GetShape())
311  {
312  throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
313  + firstName + " & " + secondName
314  + " does not match the output shape");
315  }
316 }
317 
318 //---------------------------------------------------------------
319 void ValidateDataTypes(const TensorInfo& info,
320  const std::vector<armnn::DataType>& supportedTypes,
321  std::string const& descName)
322 {
323  auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
324  if (iterator == supportedTypes.end())
325  {
326  throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
327  }
328 }
329 
330 //---------------------------------------------------------------
331 void ValidateTensorDataTypesMatch(const TensorInfo& first,
332  const TensorInfo& second,
333  std::string const& descName,
334  std::string const& firstName,
335  std::string const& secondName)
336 {
337  if (first.GetDataType() != second.GetDataType())
338  {
339  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
340  " must have identical data types.");
341  }
342 }
343 
344 //---------------------------------------------------------------
345 void ValidateTensorNumElementsMatch(const TensorInfo& first,
346  const TensorInfo& second,
347  std::string const& descName,
348  std::string const& firstName,
349  std::string const& secondName)
350 {
351  if (first.GetNumElements() != second.GetNumElements())
352  {
353  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
354  " must have the same number of elements.");
355  }
356 }
357 
358 void ValidateWeightDataType(const TensorInfo& inputInfo,
359  const TensorInfo& weightInfo,
360  const std::string& descName)
361 {
362  const DataType inputType = inputInfo.GetDataType();
363  if (IsQuantized8BitType(inputType))
364  {
366  const std::vector<DataType> validTypes =
367  {
368  DataType::QAsymmS8,
369  DataType::QAsymmU8,
370  DataType::QSymmS8,
371  DataType::QuantizedSymm8PerAxis // deprecated
372  };
374 
375  ValidateDataTypes(weightInfo, validTypes, descName);
376  }
377  else
378  {
379  ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
380  }
381 }
382 
383 void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
384  const std::string& descName,
385  const std::string& tensorName)
386 {
387  const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
388  if (!quantizationDim.has_value())
389  {
390  throw InvalidArgumentException(fmt::format("{0}: Quantization dimension for per-axis quantization "
391  "not set on tensor {1}.", descName, tensorName));
392  }
393 
394  if (quantizationDim.value() != 0)
395  {
396  throw InvalidArgumentException(fmt::format(
397  "{0}: Quantization dimension for per-axis quantization expected to be 0 on tensor {1}, "
398  "but got: {2}", descName, tensorName, quantizationDim.value()));
399  }
400 }
401 
402 void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
403  const std::string& descName,
404  const std::string& tensorName)
405 {
406  int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
407  if (quantizationOffset != 0)
408  {
409  throw InvalidArgumentException(fmt::format(
410  "{0}: Quantization offset for per-axis quantization expected to be 0 on tensor {1}, but got: {2}",
411  descName, tensorName, quantizationOffset));
412  }
413 }
414 
415 void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
416  const TensorInfo& outputInfo,
417  const TensorInfo& weightInfo,
418  const Optional<TensorInfo>& optionalBiasInfo,
419  const std::string& descName)
420 {
421  if (weightInfo.HasPerAxisQuantization())
422  {
423  const DataType inputDataType = inputInfo.GetDataType();
424  const DataType outputDataType = outputInfo.GetDataType();
425 
426  const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
427 
428  if (!canHavePerAxisQuantization)
429  {
430  throw InvalidArgumentException(fmt::format(
431  "{0}: Per-axis quantization parameters set on tensor {1}, but data type does not support "
432  "per-axis quantization.", descName, "weight"));
433  }
434 
435 
436  ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
437  ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
438  ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
439 
440  if (optionalBiasInfo.has_value())
441  {
442  const TensorInfo& biasInfo = optionalBiasInfo.value();
443  if (!biasInfo.HasPerAxisQuantization())
444  {
445  throw InvalidArgumentException(fmt::format(
446  "{}: Per-axis quantization parameters not set on bias tensor, "
447  "despite being set on weight tensor.", descName));
448  }
449 
450  ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
451  ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
452  ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
453  }
454  }
455 }
456 
457 } // anonymous namespace
458 
459 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
460  unsigned int numExpectedIn, unsigned int numExpectedOut) const
461 {
462  ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
463  ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
464 }
465 
466 //---------------------------------------------------------------
467 void MapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
468 {
469  const std::string descriptorName{"MapQueueDescriptor"};
470 
471  ValidateNumInputs(workloadInfo, descriptorName, 1);
472  ValidateNumOutputs(workloadInfo, descriptorName, 0);
473 
474  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
475  {
476  if (!m_Inputs[i])
477  {
479  fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
480  }
481  }
482 }
483 
484 //---------------------------------------------------------------
485 void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
486 {
487  const std::string descriptorName{"UnmapQueueDescriptor"};
488 
489  ValidateNumInputs(workloadInfo, descriptorName, 1);
490  ValidateNumOutputs(workloadInfo, descriptorName, 0);
491 
492  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
493  {
494  if (!m_Inputs[i])
495  {
497  fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
498  }
499  }
500 }
501 
502 //---------------------------------------------------------------
503 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
504 {
505  const std::string descriptorName{"MemCopyQueueDescriptor"};
506 
507  ValidateNumInputs(workloadInfo, descriptorName, 1);
508  ValidateNumOutputs(workloadInfo, descriptorName , 1);
509 
510  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
511  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
512 
513  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
514  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
515 
516  if (m_Inputs.size() != m_Outputs.size())
517  {
518  throw InvalidArgumentException(fmt::format(
519  "{0}: Number of inputs ({1}) does not match the number of outputs ({2}).",
520  descriptorName, m_Inputs.size(), m_Outputs.size()));
521  }
522 
523  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
524  {
525  if (!m_Inputs[i])
526  {
527  throw InvalidArgumentException(fmt::format(
528  "{0}: Invalid NULL input {1}.", descriptorName, i));
529  }
530 
531  if (!m_Outputs[i])
532  {
533  throw InvalidArgumentException(fmt::format("{0}: Invalid NULL output {1}", descriptorName, i));
534  }
535  }
536 }
537 
538 //---------------------------------------------------------------
539 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
540 {
541  ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
542  ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
543 
544  if (workloadInfo.m_InputTensorInfos.size() != 1)
545  {
546  throw InvalidArgumentException(fmt::format("Number of input infos ({}) is not 1.",
547  workloadInfo.m_InputTensorInfos.size()));
548 
549  }
550 
551  if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
552  {
553  throw InvalidArgumentException(fmt::format(
554  "Number of input infos ({0}) does not match the number of output infos ({1})",
555  workloadInfo.m_InputTensorInfos.size(), workloadInfo.m_OutputTensorInfos.size()));
556  }
557 
558  for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
559  {
560  if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
561  workloadInfo.m_OutputTensorInfos[i].GetNumElements())
562  {
563  throw InvalidArgumentException(fmt::format(
564  "Number of elements for tensor input and output {} does not match", i ));
565  }
566  }
567 
568  if (m_Inputs.size() != 1)
569  {
570  throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
571  }
572 
573  if (m_Inputs.size() != m_Outputs.size())
574  {
575  throw InvalidArgumentException(fmt::format(
576  "Number of inputs ({0}) does not match the number of outputs ({1})",
577  m_Inputs.size(), m_Outputs.size()));
578  }
579 
580  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
581  {
582  if (!m_Inputs[i])
583  {
584  throw InvalidArgumentException(fmt::format("Invalid null input {}", i));
585  }
586 
587  if (!m_Outputs[i])
588  {
589  throw InvalidArgumentException(fmt::format("Invalid null output {}", i));
590  }
591  }
592 }
593 
594 //---------------------------------------------------------------
595 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
596 {
597  ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
598  ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
599 
600  if (m_Inputs.size() != 1)
601  {
602  throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
603  }
604 
605  if (m_Outputs.size() != 0)
606  {
607  throw InvalidArgumentException(fmt::format("Number of outputs ({}) is not 0.", m_Outputs.size()));
608  }
609 
610  if (!m_Inputs[0])
611  {
612  throw InvalidArgumentException(fmt::format("Invalid null input 0"));
613  }
614 }
615 
616 //---------------------------------------------------------------
617 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
618 {
619  const std::string descriptorName{"ActivationQueueDescriptor"};
620 
621  ValidateNumInputs(workloadInfo, descriptorName, 1);
622  ValidateNumOutputs(workloadInfo, descriptorName, 1);
623 
624  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
625  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
626 
627  std::vector<DataType> supportedTypes =
628  {
635  };
636 
637  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
638  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
639  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
640 }
641 
642 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
643 {
644  const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
645 
646  ValidateNumInputs(workloadInfo, descriptorName, 1);
647  ValidateNumOutputs(workloadInfo, descriptorName, 1);
648 
649  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
650  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
651 
652  if (outputTensorInfo.GetDataType() != DataType::Signed32 &&
653  outputTensorInfo.GetDataType() != DataType::Signed64)
654  {
655  throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32 or Int64.");
656  }
657 
658  std::vector<DataType> supportedInputTypes =
659  {
668  };
669 
670  ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
671 
672  auto inputShape = inputTensorInfo.GetShape();
673  auto outputShape = outputTensorInfo.GetShape();
674 
675  auto inputNumDimensions = inputShape.GetNumDimensions();
676  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
677 
678  const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
679 
680  // 1D input shape results in scalar output shape
681  if (inputShape.GetNumDimensions() == 1)
682  {
683  if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
684  {
685  throw InvalidArgumentException(descriptorName + outputShapeError);
686  }
687  }
688  else
689  {
690  for (unsigned int i = 0; i < unsignedAxis; ++i)
691  {
692  if (outputShape[i] != inputShape[i])
693  {
694  throw InvalidArgumentException(descriptorName + outputShapeError);
695  }
696  }
697 
698  for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
699  {
700  if (outputShape[i - 1] != inputShape[i])
701  {
702  throw InvalidArgumentException(descriptorName + outputShapeError);
703  }
704  }
705  }
706 }
707 
708 void CastQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
709 {
710  const std::string descriptorName{"CastQueueDescriptor"};
711 
712  ValidateNumInputs(workloadInfo, descriptorName, 1);
713  ValidateNumOutputs(workloadInfo, descriptorName, 1);
714 
715  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
716  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
717 
718  std::vector<DataType> supportedTypes =
719  {
729  };
730 
731  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
732  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
733 }
734 
735 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
736 {
737  const std::string descriptorName{"SoftmaxQueueDescriptor"};
738 
739  ValidateNumInputs(workloadInfo, descriptorName, 1);
740  ValidateNumOutputs(workloadInfo, descriptorName, 1);
741 
742  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
743  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
744 
745  std::vector<DataType> supportedTypes =
746  {
753  };
754 
755  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
756  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
757  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
758 }
759 
760 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
761 {
762  const std::string descriptorName{"SplitterQueueDescriptor"};
763 
764  ValidateNumInputs(workloadInfo, descriptorName, 1);
765 
766  // Check the supported data types
767  std::vector<DataType> supportedTypes =
768  {
777  };
778 
779  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
780  for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
781  {
782  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
783  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
784 
785  const std::string outputName = "output_" + std::to_string(i);
786  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
787  }
788 
789  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
790  {
791  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
792  }
793 
794  if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
795  {
797  descriptorName + ": Number of split windows "
798  "has to match number of workloadInfo.m_OutputTensorInfos. "
799  "Number of windows: " +
800  to_string(m_ViewOrigins.size()) +
801  ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
802  }
803 
804  //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
805  std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
806  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
807  {
808  //Checks that the dimensionality of input is same as the split windows.
809  ViewOrigin const& e = m_ViewOrigins[w];
810  if (e.m_Origin.size() != inputDims)
811  {
812  throw InvalidArgumentException(descriptorName + ": Window origin have to "
813  "have the same dimensionality as the input tensor. "
814  "Window origin (index: " +
815  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
816  " dimensions, the input "
817  "tensor has " +
818  to_string(inputDims) + " dimensions.");
819  }
820  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
821  {
822  if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
823  workloadInfo.m_InputTensorInfos[0].GetShape()[i])
824  {
825  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
826  "be smaller or equal than the size of the input in that coord.");
827  }
828  }
829  }
830 }
831 
832 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
833 {
834  const std::string descriptorName{"ConcatQueueDescriptor"};
835 
836  ValidateNumOutputs(workloadInfo, descriptorName, 1);
837 
838  if (m_Inputs.size() <= 0)
839  {
840  throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
841  }
842  if (m_Outputs.size() <= 0)
843  {
844  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
845  }
846 
847  if (workloadInfo.m_InputTensorInfos.size() <= 0)
848  {
849  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
850  }
851  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
852  {
853  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
854  }
855 
856  if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
857  {
858  throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
859  }
860 
861  if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
862  {
863  return;
864  }
865 
866  if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
867  {
869  descriptorName + ": Number of split windows "
870  "has to match number of workloadInfo.m_InputTensorInfos. "
871  "Number of windows: " +
872  to_string(m_ViewOrigins.size()) +
873  ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
874  }
875 
876  //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
877  std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
878  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
879  {
880  //Checks that the dimensionality of output is same as the split windows.
881  ViewOrigin const& e = m_ViewOrigins[w];
882  if (e.m_Origin.size() != outputDims)
883  {
884  throw InvalidArgumentException(descriptorName + ": Window origin have to "
885  "have the same dimensionality as the output tensor. "
886  "Window origin (index: " +
887  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
888  " dimensions, the output "
889  "tensor has " +
890  to_string(outputDims) + " dimensions.");
891  }
892  //Checks that the merge windows are within the output tensor.
893  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
894  {
895  if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
896  > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
897  {
898  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
899  "be smaller or equal than the size of the output in that coord.");
900  }
901  }
902  }
903 
904  // Check the supported data types
905  std::vector<DataType> supportedTypes =
906  {
915  };
916 
917  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
918  for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
919  {
920  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
921  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
922 
923  const std::string inputName = "input_" + std::to_string(i);
924  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
925  }
926 }
927 
928 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
929 {
930  const std::string descriptorName{"StackQueueDescriptor"};
931 
932  ValidateNumOutputs(workloadInfo, descriptorName, 1);
933 
934  if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
935  {
936  throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
937  }
938 
939  // All inputs must have the same shape, which is defined in parameters
940  const TensorShape& inputShape = m_Parameters.m_InputShape;
941  for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
942  {
943  if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
944  {
945  throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
946  }
947  }
948 
949  if (inputShape.GetNumDimensions() > 4)
950  {
951  throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
952  }
953 
954  // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
955  // since the output tensor has an additional dimension.
956  if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
957  {
958  throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
959  "than the number of input dimensions.");
960  }
961 
962  // Output shape must be as inferred from the input shape
963  const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
964  for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
965  {
966  if (outputShape[i] != inputShape[i])
967  {
968  throw InvalidArgumentException(descriptorName + ": Output tensor must "
969  "match shape inferred from input tensor.");
970  }
971  }
972 
973  if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
974  {
975  throw InvalidArgumentException(descriptorName + ": Output tensor must "
976  "match shape inferred from input tensor.");
977  }
978 
979  for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
980  {
981  if (outputShape[i] != inputShape[i-1])
982  {
983  throw InvalidArgumentException(descriptorName + ": Output tensor must "
984  "match shape inferred from input tensor.");
985  }
986  }
987 
988  if (outputShape.GetNumDimensions() > 5)
989  {
990  throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
991  }
992 
993  // Check the supported data types
994  std::vector<DataType> supportedTypes =
995  {
1004  };
1005 
1006  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1007 
1008  for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1009  {
1010  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1011  workloadInfo.m_InputTensorInfos[i],
1012  descriptorName,
1013  "input_0",
1014  "input_" + std::to_string(i));
1015  }
1016 
1017  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1018  workloadInfo.m_OutputTensorInfos[0],
1019  descriptorName,
1020  "input_0",
1021  "output");
1022 }
1023 
1024 void FillQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1025 {
1026  const std::string descriptorName{"FillQueueDescriptor"};
1027 
1028  ValidateNumInputs(workloadInfo, descriptorName, 1);
1029  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1030 
1031  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1032  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1033 
1034  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 1, "input");
1035 
1036  std::vector<DataType> supportedTypes =
1037  {
1042  };
1043 
1044  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1045 }
1046 
1048 {
1049  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
1050 
1051  uint32_t numInputs = 1;
1052  if (!m_Parameters.m_ConstantWeights)
1053  {
1054  numInputs = 2;
1055  if (m_Parameters.m_BiasEnabled)
1056  {
1057  numInputs = 3;
1058  }
1059  }
1060  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1061  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1062 
1063  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1064  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1065 
1066  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1067 
1068  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
1069  {
1070  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
1071  }
1072 
1073  TensorInfo weightTensorInfo;
1074  if (m_Parameters.m_ConstantWeights)
1075  {
1076  ValidatePointer(m_Weight, descriptorName, "weight");
1077  weightTensorInfo = m_Weight->GetTensorInfo();
1078  }
1079  else
1080  {
1081  weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
1082  }
1083  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
1084 
1085  if (m_Parameters.m_BiasEnabled)
1086  {
1087  TensorInfo biasTensorInfo;
1088  if (m_Parameters.m_ConstantWeights)
1089  {
1090  ValidatePointer(m_Bias, descriptorName, "bias");
1091  biasTensorInfo = m_Bias->GetTensorInfo();
1092  }
1093  else
1094  {
1095  biasTensorInfo = workloadInfo.m_InputTensorInfos[2];
1096  }
1097  // Validates type and quantization values.
1098  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1099  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1100  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1101  }
1102 
1103  // Check the supported data types
1104  std::vector<DataType> supportedTypes =
1105  {
1112  };
1113 
1114  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1115 
1116  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1117  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1118  {
1119  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1120  {
1121  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1122  "for BFloat16 input.");
1123  }
1124  }
1125  else
1126  {
1127  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1128  }
1129 }
1130 
1132 {
1133  const std::string descriptorName{"NormalizationQueueDescriptor"};
1134 
1135  ValidateNumInputs(workloadInfo, descriptorName, 1);
1136  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1137 
1138  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1139  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1140 
1141  // Check the supported data types
1142  std::vector<DataType> supportedTypes =
1143  {
1150  };
1151 
1152  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1153 
1154  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1155 
1156  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1157 }
1158 
1159 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1160 {
1161  const std::string descriptorName{"AdditionQueueDescriptor"};
1162 
1163  ValidateNumInputs(workloadInfo, descriptorName, 2);
1164  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1165 
1166  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1167  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1168  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1169 
1170  std::vector<DataType> supportedTypes =
1171  {
1179  };
1180 
1181  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1182  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1183  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1184 
1185  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1186  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1187 
1188  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1189  inputTensorInfo1,
1190  outputTensorInfo,
1191  descriptorName,
1192  "input_0",
1193  "input_1");
1194 }
1195 
1197 {
1198  const std::string descriptorName{"MultiplicationQueueDescriptor"};
1199 
1200  ValidateNumInputs(workloadInfo, descriptorName, 2);
1201  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1202 
1203  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1204  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1205  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1206 
1207  std::vector<DataType> supportedTypes =
1208  {
1216  };
1217 
1218  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1219  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1220  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1221 
1222  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1223  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1224 
1225  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1226  inputTensorInfo1,
1227  outputTensorInfo,
1228  descriptorName,
1229  "input_0",
1230  "input_1");
1231 }
1232 
1234 {
1235  const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
1236 
1237  ValidateNumInputs(workloadInfo, descriptorName, 1);
1238  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1239 
1240  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1241  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1242 
1243  std::vector<DataType> supportedTypes =
1244  {
1251  };
1252 
1253  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1254  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1255 
1256  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1257  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1258 
1259  ValidatePointer(m_Mean, descriptorName, "mean");
1260  ValidatePointer(m_Variance, descriptorName, "variance");
1261  ValidatePointer(m_Beta, descriptorName, "beta");
1262  ValidatePointer(m_Gamma, descriptorName, "gamma");
1263 
1264  const TensorInfo& mean = m_Mean->GetTensorInfo();
1265  const TensorInfo& variance = m_Variance->GetTensorInfo();
1266  const TensorInfo& beta = m_Beta->GetTensorInfo();
1267  const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1268 
1269  ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1270  ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1271  ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1272  ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
1273 
1274  ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1275  ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1276  ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
1277 }
1278 
1280 {
1281  const std::string descriptorName{"Convolution2dQueueDescriptor"};
1282 
1283  ValidateNumInputs(workloadInfo, descriptorName, 1);
1284  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1285 
1286  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1287  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1288 
1289  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1290  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1291 
1292  ValidatePointer(m_Weight, descriptorName, "weight");
1293 
1294  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1295  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1296 
1297  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1298 
1299  Optional<TensorInfo> optionalBiasTensorInfo;
1300  if (m_Parameters.m_BiasEnabled)
1301  {
1302  ValidatePointer(m_Bias, descriptorName, "bias");
1303 
1304  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1305  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1306 
1307  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1308  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1309  }
1310 
1311  if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1312  {
1314  fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1315  "cannot be either negative or 0.",
1316  descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1317  }
1318 
1319  ValidatePerAxisQuantization(inputTensorInfo,
1320  outputTensorInfo,
1321  weightTensorInfo,
1322  optionalBiasTensorInfo,
1323  descriptorName);
1324 
1325  std::vector<DataType> supportedTypes =
1326  {
1334  };
1335 
1336  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1337 
1338  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1339  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1340  {
1341  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1342  {
1343  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1344  "for BFloat16 input.");
1345  }
1346  }
1347  else
1348  {
1349  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1350  }
1351 }
1352 
1354 {
1355  const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1356 
1357  ValidateNumInputs(workloadInfo, descriptorName, 1);
1358  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1359 
1360  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1361  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1362 
1363  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1364  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1365 
1366  ValidatePointer(m_Weight, descriptorName, "weight");
1367 
1368  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1369  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1370 
1371  if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1372  {
1374  fmt::format("{}: dilationX (provided {}) and dilationY (provided {}) "
1375  "cannot be smaller than 1.",
1376  descriptorName, m_Parameters.m_DilationX, m_Parameters.m_DilationX));
1377  }
1378 
1379  if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1380  {
1382  fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1383  "cannot be either negative or 0.",
1384  descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1385  }
1386 
1387  const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1388 
1389  // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1390  // inputChannels * channelMultiplier should be equal to outputChannels.
1391  const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1392  const unsigned int numWeightInputChannels = weightTensorInfo.GetShape()[1];
1393  const unsigned int numWeightOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1394  if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1395  {
1396  throw InvalidArgumentException(fmt::format(
1397  "{0}: output_channels (provided {1}) should be equal to input_channels (provided {2}) "
1398  "multiplied by channel_multiplier (provided {3}).",
1399  descriptorName, numWeightOutputChannels, numWeightInputChannels, numWeightChannelMultiplier));
1400  }
1401 
1402  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1403 
1404  Optional<TensorInfo> optionalBiasTensorInfo;
1405  if (m_Parameters.m_BiasEnabled)
1406  {
1407  ValidatePointer(m_Bias, descriptorName, "bias");
1408 
1409  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1410  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1411 
1412  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1413  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1414  }
1415  ValidatePerAxisQuantization(inputTensorInfo,
1416  outputTensorInfo,
1417  weightTensorInfo,
1418  optionalBiasTensorInfo,
1419  descriptorName);
1420 
1421  std::vector<DataType> supportedTypes =
1422  {
1429  };
1430 
1431  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1432  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1433 }
1434 
1435 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1436 {
1437  const std::string descriptorName{"PermuteQueueDescriptor"};
1438 
1439  ValidateNumInputs(workloadInfo, descriptorName, 1);
1440  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1441 
1442  const PermutationVector& mapping = m_Parameters.m_DimMappings;
1443 
1444  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1445  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1446 
1447  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1448  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1449 
1450  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1451  {
1452  if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1453  {
1454  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1455  " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1456  "must match dst dimension " + to_string(mapping[i]) +
1457  " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1458  }
1459  }
1460 
1461  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1462 }
1463 
1464 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1465 {
1466  const std::string descriptorName{"Pooling2dQueueDescriptor"};
1467 
1468  ValidateNumInputs(workloadInfo, descriptorName, 1);
1469  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1470 
1471  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1472  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1473 
1474  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1475  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1476 
1477  std::vector<DataType> supportedTypes =
1478  {
1485  };
1486 
1487  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1488  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1489 }
1490 
1492 {
1493  const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1494 
1495  ValidateNumInputs(workloadInfo, descriptorName, 1);
1496  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1497 
1498  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1499  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1500 
1501  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1502  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1503 
1504  std::vector<DataType> supportedTypes =
1505  {
1512  };
1513 
1514  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1515  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1516 
1517  // ResizeBilinear only changes width and height: batch and channel count must match.
1518  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1519  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1520  if (inputBatchSize != outputBatchSize)
1521  {
1523  fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1524  descriptorName, inputBatchSize, outputBatchSize));
1525  }
1526 
1527  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1528  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1529  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1530  if (inputChannelCount != outputChannelCount)
1531  {
1533  fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1534  descriptorName, inputChannelCount, outputChannelCount));
1535  }
1536 }
1537 
1538 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1539 {
1540  const std::string descriptorName{"ResizeQueueDescriptor"};
1541 
1542  ValidateNumInputs(workloadInfo, descriptorName, 1);
1543  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1544 
1545  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1546  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1547 
1548  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1549  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1550 
1551  std::vector<DataType> supportedTypes =
1552  {
1559  };
1560 
1561  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1562  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1563 
1564  // Resize only changes width and height: batch and channel count must match.
1565  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1566  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1567  if (inputBatchSize != outputBatchSize)
1568  {
1570  fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1571  descriptorName, inputBatchSize, outputBatchSize));
1572  }
1573 
1574  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1575  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1576  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1577  if (inputChannelCount != outputChannelCount)
1578  {
1580  fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1581  descriptorName, inputChannelCount, outputChannelCount));
1582  }
1583 }
1584 
1586 {
1587  const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1588 
1589  ValidateNumInputs(workloadInfo, descriptorName, 1);
1590  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1591 
1592  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1593  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1594 
1595  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1596  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1597 
1598  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1599 
1600  if (m_Parameters.m_Min > m_Parameters.m_Max)
1601  {
1602  throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1603  }
1604 }
1605 
1607 {
1608  const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1609 
1610  ValidateNumInputs(workloadInfo, descriptorName, 1);
1611  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1612 
1613  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1614  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1615 
1616  if (inputTensorInfo.GetNumDimensions() > 4)
1617  {
1618  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1619  }
1620 
1621  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1622 
1623  // Check the supported data types
1624  std::vector<DataType> supportedTypes =
1625  {
1629  };
1630 
1631  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1632  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1633 }
1634 
1636 {
1637  const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1638 
1639  ValidateNumInputs(workloadInfo, descriptorName, 1);
1640  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1641 
1642  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1643  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1644 
1645  if (inputTensorInfo.GetNumDimensions() > 4)
1646  {
1647  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1648  }
1649 
1650  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1651 
1652  // Check the supported data types
1653  std::vector<DataType> supportedTypes =
1654  {
1661  };
1662 
1663  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1664  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1665 }
1666 
1667 void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1668 {
1669  const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1670 
1671  ValidateNumInputs(workloadInfo, descriptorName, 1);
1672  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1673 
1674  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1675  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1676 
1677  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1678 
1679  std::vector<DataType> supportedTypes =
1680  {
1684  };
1685 
1686  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1687  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1688 }
1689 
1690 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1691 {
1692  const std::string descriptorName{"ConstantQueueDescriptor"};
1693 
1694  ValidateNumInputs(workloadInfo, descriptorName, 0);
1695  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1696 
1697  if (!m_LayerOutput)
1698  {
1699  throw InvalidArgumentException(descriptorName + ": No const input specified.");
1700  }
1701 
1702  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1703  ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1704 
1705  // Check the supported data types
1706  std::vector<DataType> supportedTypes =
1707  {
1716  };
1717 
1718  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1719 }
1720 
1721 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1722 {
1723  const std::string descriptorName{"ReshapeQueueDescriptor"};
1724 
1725  ValidateNumInputs(workloadInfo, descriptorName, 1);
1726  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1727 
1728  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1729  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1730 
1731  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1732 
1733  // Check the supported data types
1734  std::vector<DataType> supportedTypes =
1735  {
1744  };
1745 
1746  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1747  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1748 }
1749 
1751 {
1752  const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1753 
1754  ValidateNumInputs(workloadInfo, descriptorName, 1);
1755  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1756 
1757  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1758  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1759 
1760  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1761  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1762 
1763  if (m_Parameters.m_BlockShape.size() != 2)
1764  {
1765  throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1766  }
1767 
1768  if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1769  {
1770  throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1771  "dimensions as Block Shape.");
1772  }
1773 
1774  const TensorShape& inputShape = inputTensorInfo.GetShape();
1775 
1776  std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1777  std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1778 
1779  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1780 
1781  const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1782  widthPad.first + widthPad.second;
1783  const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1784  heightPad.first + heightPad.second;
1785 
1786  const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1787  inputShape[dimensionIndices.GetChannelsIndex()];
1788  const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1789 
1790  if (numOutputElements != numInputElements)
1791  {
1792  throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1793  to_string(numInputElements) + " after padding but output tensor has " +
1794  to_string(numOutputElements) + " elements.");
1795  }
1796 
1797  if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1798  {
1799  throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1800  "divisible by Block Shape in all spatial dimensions");
1801  }
1802 
1803  std::vector<DataType> supportedTypes =
1804  {
1811  };
1812 
1813  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1814  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1815 }
1816 
1818 {
1819  const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1820 
1821  ValidateNumInputs(workloadInfo, descriptorName, 1);
1822  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1823 
1824  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1825  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1826 
1827  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1828  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1829 
1830  std::vector<DataType> supportedTypes =
1831  {
1838  };
1839 
1840  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1841  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1842 
1843  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1844 
1845  if (m_Parameters.m_BlockSize == 0)
1846  {
1847  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1848  }
1849 
1850  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1851  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1852  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1853  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1854 
1855  const TensorShape& inputShape = inputTensorInfo.GetShape();
1856  if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1857  {
1858  throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1859  "by block size in all spatial dimensions");
1860  }
1861 
1862  const TensorShape& outputShape = outputTensorInfo.GetShape();
1863  if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1864  {
1865  throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1866  "must be divisible by the square of block size." );
1867  }
1868 }
1869 
1870 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1871 {
1872  const std::string descriptorName{"FloorQueueDescriptor"};
1873 
1874  ValidateNumInputs(workloadInfo, descriptorName, 1);
1875  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1876 
1877  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1878  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1879 
1880  std::vector<DataType> supportedTypes =
1881  {
1886  };
1887 
1888  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1889 
1890  if (inputTensorInfo != outputTensorInfo)
1891  {
1892  throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1893  }
1894 }
1895 
1896 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1897 {
1898  // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1899 
1900  const std::string descriptorName{"LstmQueueDescriptor"};
1901 
1902  // check dimensions of all inputs and outputs
1903  if (workloadInfo.m_InputTensorInfos.size() != 3)
1904  {
1905  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1906  }
1907  if (workloadInfo.m_OutputTensorInfos.size() != 4)
1908  {
1909  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1910  }
1911 
1912  std::vector<DataType> supportedTypes =
1913  {
1918  };
1919 
1920  // check for supported type of one input and match them with all the other input and output
1921  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1922 
1923  // type matches all other inputs
1924  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1925  {
1926  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1927  workloadInfo.m_InputTensorInfos[i],
1928  descriptorName,
1929  "input_0",
1930  "input_" + std::to_string(i));
1931  }
1932  // type matches all other outputs
1933  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1934  {
1935  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1936  workloadInfo.m_OutputTensorInfos[i],
1937  "LstmQueueDescriptor",
1938  "input_0",
1939  "output_" + std::to_string(i));
1940  }
1941 
1942  // Making sure clipping parameters have valid values.
1943  // == 0 means no clipping
1944  // > 0 means clipping
1945  if (m_Parameters.m_ClippingThresCell < 0.0f)
1946  {
1947  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
1948  }
1949  if (m_Parameters.m_ClippingThresProj < 0.0f)
1950  {
1951  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
1952  }
1953 
1954 
1955  // Inferring batch size, number of outputs and number of cells from the inputs.
1956  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1957  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1958  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1959  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1960  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1961  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1962 
1963  // input tensor
1964  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1965  descriptorName + " input_0");
1966  // outputStateInTensor
1967  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1968  descriptorName + " input_1");
1969  // outputStateInTensor
1970  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1971  descriptorName + " input_2");
1972  // scratchBufferTensor
1973  unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1974  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1975  descriptorName + " output_0");
1976  // outputStateOutTensor
1977  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1978  descriptorName + " output_1");
1979  // cellStateOutTensor
1980  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1981  descriptorName + " output_2");
1982  // outputTensor
1983  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1984  descriptorName + " output_3");
1985 
1986 
1987  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1988  if ( m_InputToInputWeights )
1989  {
1990  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1991  (n_cell * n_input), "InputLayerNormWeights");
1992  }
1993 
1994  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1995  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1996  (n_cell * n_input), "InputToForgetWeights");
1997 
1998  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1999  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
2000  (n_cell * n_input), "InputToCellWeights");
2001 
2002  if ( m_RecurrentToInputWeights )
2003  {
2004  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
2005  (n_cell * n_output), "RecurrentToInputWeights");
2006  }
2007 
2008  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
2009  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
2010  (n_cell * n_output), "RecurrentToForgetWeights");
2011 
2012  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
2013  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
2014  (n_cell * n_output), "RecurrentToCellWeights");
2015 
2016  // Make sure the input-gate's parameters are either both present (regular
2017  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
2018  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
2019  !m_Parameters.m_CifgEnabled) ||
2020  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
2021  m_Parameters.m_CifgEnabled));
2022  if (!cifg_weights_all_or_none)
2023  {
2024  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
2025  "RecurrentToInputWeights must either both be present (regular LSTM) "
2026  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
2027  "accordingly.");
2028  }
2029 
2030  if ( m_CellToInputWeights )
2031  {
2032  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
2033  n_cell, "CellToInputWeights");
2034  }
2035  if ( m_CellToForgetWeights )
2036  {
2037  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
2038  n_cell, "CellToForgetWeights");
2039  }
2040  if ( m_CellToOutputWeights )
2041  {
2042  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
2043  n_cell, "CellToOutputWeights");
2044  }
2045 
2046  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
2047  bool peephole_weights_all_or_none =
2048  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
2049  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
2050  || ( !m_CellToInputWeights && !m_CellToForgetWeights
2051  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
2052  if (!peephole_weights_all_or_none)
2053  {
2054  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
2055  }
2056 
2057  // Make sure the input gate bias is present only when not a CIFG-LSTM.
2058  if (m_Parameters.m_CifgEnabled)
2059  {
2060  if (m_InputGateBias)
2061  {
2062  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
2063  }
2064  }
2065  else
2066  {
2067  if (!m_InputGateBias)
2068  {
2069  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
2070  "must be present.");
2071  }
2072  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
2073  n_cell, "InputGateBias");
2074  }
2075 
2076  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
2077  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
2078 
2079  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
2080  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
2081 
2082  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
2083  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
2084 
2085  if (m_ProjectionWeights)
2086  {
2087  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
2088  (n_cell * n_output), "ProjectionWeights");
2089  }
2090  if (m_ProjectionBias)
2091  {
2092  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
2093  }
2094 
2095  // Making sure the projection tensors are consistent:
2096  // 1) If projection weight is not present, then projection bias should not be
2097  // present.
2098  // 2) If projection weight is present, then projection bias is optional.
2099  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
2100  !m_Parameters.m_ProjectionEnabled)
2101  || (m_ProjectionWeights && !m_ProjectionBias &&
2102  m_Parameters.m_ProjectionEnabled)
2103  || (m_ProjectionWeights && m_ProjectionBias &&
2104  m_Parameters.m_ProjectionEnabled));
2105  if (!projecton_tensors_consistent)
2106  {
2107  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
2108  }
2109 
2110  // The four layer normalization weights either all have values or none of them have values. Additionally, if
2111  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
2112  // either all have values or none of them have values. Layer normalization is used when the values of all the
2113  // layer normalization weights are present
2114  if (m_InputLayerNormWeights)
2115  {
2116  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
2117  }
2118  if (m_ForgetLayerNormWeights)
2119  {
2120  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2121  }
2122  if (m_CellLayerNormWeights)
2123  {
2124  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2125  }
2126  if (m_OutputLayerNormWeights)
2127  {
2128  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2129  }
2130 
2131  if (m_Parameters.m_LayerNormEnabled)
2132  {
2133  if (!m_Parameters.m_CifgEnabled)
2134  {
2135  if (!m_InputLayerNormWeights)
2136  {
2137  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
2138  "disabled but InputLayerNormWeights are not present");
2139  }
2140  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2141  1, n_cell, "InputLayerNormWeights");
2142  }
2143  else if (m_InputLayerNormWeights)
2144  {
2145  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
2146  "enabled");
2147  }
2148 
2149  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
2150  "ForgetLayerNormWeights");
2151  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2152 
2153  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
2154  "OutputLayerNormWeights");
2155  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2156 
2157  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
2158  "CellLayerNormWeights");
2159  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2160  }
2161  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2162  {
2163  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
2164  "normalisation weights are present.");
2165  }
2166 }
2167 
2169 {
2170  const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"};
2171 
2172  ValidateNumInputs(workloadInfo, descriptorName, 1);
2173  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2174 
2175  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2176  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2177 
2178  if (inputTensorInfo.GetDataType() != DataType::BFloat16)
2179  {
2180  throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16.");
2181  }
2182 
2183  if (outputTensorInfo.GetDataType() != DataType::Float32)
2184  {
2185  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2186  }
2187 
2188  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2189 }
2190 
2192 {
2193  const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
2194 
2195  ValidateNumInputs(workloadInfo, descriptorName, 1);
2196  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2197 
2198  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2199  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2200 
2201  if (inputTensorInfo.GetDataType() != DataType::Float32)
2202  {
2203  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2204  }
2205 
2206  if (outputTensorInfo.GetDataType() != DataType::BFloat16)
2207  {
2208  throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
2209  }
2210 
2211  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2212 }
2213 
2215 {
2216  const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
2217 
2218  ValidateNumInputs(workloadInfo, descriptorName, 1);
2219  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2220 
2221  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2222  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2223 
2224  if (inputTensorInfo.GetDataType() != DataType::Float32)
2225  {
2226  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2227  }
2228 
2229  if (outputTensorInfo.GetDataType() != DataType::Float16)
2230  {
2231  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
2232  }
2233 
2234  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2235 }
2236 
2238 {
2239  const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
2240 
2241  ValidateNumInputs(workloadInfo, descriptorName, 1);
2242  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2243 
2244  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2245  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2246 
2247  if (inputTensorInfo.GetDataType() != DataType::Float16)
2248  {
2249  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
2250  }
2251 
2252  if (outputTensorInfo.GetDataType() != DataType::Float32)
2253  {
2254  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2255  }
2256 
2257  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2258 }
2259 
2260 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2261 {
2262  const std::string descriptorName{"DivisionQueueDescriptor"};
2263 
2264  ValidateNumInputs(workloadInfo, descriptorName, 2);
2265  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2266 
2267  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2268  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2269  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2270 
2271  std::vector<DataType> supportedTypes =
2272  {
2280  };
2281 
2282  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2283  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2284  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2285 
2286  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2287  inputTensorInfo1,
2288  outputTensorInfo,
2289  descriptorName,
2290  "input_0",
2291  "input_1");
2292 }
2293 
2295 {
2296  const std::string descriptorName{"SubtractionQueueDescriptor"};
2297 
2298  ValidateNumInputs(workloadInfo, descriptorName, 2);
2299  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2300 
2301  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2302  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2303  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2304 
2305  std::vector<DataType> supportedTypes =
2306  {
2314  };
2315 
2316  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2317  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2318  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2319 
2320  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2321  inputTensorInfo1,
2322  outputTensorInfo,
2323  descriptorName,
2324  "input_0",
2325  "input_1");
2326 }
2327 
2328 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2329 {
2330  const std::string descriptorName{"MaximumQueueDescriptor"};
2331 
2332  ValidateNumInputs(workloadInfo, descriptorName, 2);
2333  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2334 
2335  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2336  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2337  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2338 
2339  std::vector<DataType> supportedTypes =
2340  {
2348  };
2349 
2350  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2351  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2352  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2353 
2354  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2355  inputTensorInfo1,
2356  outputTensorInfo,
2357  descriptorName,
2358  "input_0",
2359  "input_1");
2360 }
2361 
2362 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2363 {
2364  const std::string descriptorName{"MeanQueueDescriptor"};
2365 
2366  ValidateNumInputs(workloadInfo, descriptorName, 1);
2367  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2368 
2369  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2370  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2371 
2372  std::vector<DataType> supportedTypes =
2373  {
2380  };
2381 
2382  // First check if input tensor data type is supported, then
2383  // check if this data type matches the output tensor data type
2384  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2385  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2386 
2387  if (m_Parameters.m_KeepDims)
2388  {
2389  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2390  }
2391  else if (m_Parameters.m_Axis.empty())
2392  {
2393  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
2394  }
2395  else
2396  {
2397  unsigned int outputDim =
2398  inputTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
2399  ValidateTensorNumDimensions(outputTensorInfo,
2400  descriptorName,
2401  outputDim > 0 ? outputDim : 1,
2402  "output");
2403  }
2404 }
2405 
2406 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2407 {
2408  const std::string descriptorName{"PadQueueDescriptor"};
2409 
2410  ValidateNumInputs(workloadInfo, descriptorName, 1);
2411  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2412 
2413  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2414  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2415 
2416  // input and output should have the same number of dimensions
2417  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2418 
2419  // there should be entry in the pad list for each dimension in the input tensor
2420  if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2421  throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2422  "as there are dimensions in the input tensor that is " +
2423  std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2424  " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
2425  }
2426 }
2427 
2428 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2429 {
2430  const std::string descriptorName{"QuantizeQueueDescriptor"};
2431 
2432  ValidateNumInputs(workloadInfo, descriptorName, 1);
2433  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2434 
2435  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2436  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2437 
2438  std::vector<DataType> supportedTypes =
2439  {
2447  };
2448 
2449  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2450 
2451  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2452  {
2453  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2454  }
2455 }
2456 
2458 {
2459  const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
2460 
2461  ValidateNumInputs(workloadInfo, descriptorName, 1);
2462  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2463 
2464  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2465  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2466 
2467  std::vector<DataType> supportedTypes =
2468  {
2475  };
2476 
2477  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2478  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2479 }
2480 
2482 {
2483  const std::string descriptorName{"StridedSliceQueueDescriptor"};
2484 
2485  ValidateNumInputs(workloadInfo, descriptorName, 1);
2486  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2487 
2488  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2489  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2490 
2491  std::vector<DataType> supportedTypes =
2492  {
2499  };
2500 
2501  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2502  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2503 
2504  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2505 
2506  const uint32_t rank = inputTensorInfo.GetNumDimensions();
2507  if (rank > 4)
2508  {
2509  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2510  }
2511 
2512  // Begin, End & Stride length must be of rank(input0)
2513  if (m_Parameters.m_Begin.size() != rank)
2514  {
2515  throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2516  }
2517 
2518  if (m_Parameters.m_End.size() != rank)
2519  {
2520  throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2521  }
2522 
2523  if (m_Parameters.m_Stride.size() != rank)
2524  {
2525  throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2526  }
2527 
2528  // Stride entries must be non-zero
2529  for (auto& stride : m_Parameters.m_Stride)
2530  {
2531  if (stride == 0)
2532  {
2533  throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2534  }
2535  }
2536 }
2537 
2538 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2539 {
2540  const std::string descriptorName{"MinimumQueueDescriptor"};
2541 
2542  ValidateNumInputs(workloadInfo, descriptorName, 2);
2543  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2544 
2545  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2546  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2547  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2548 
2549  std::vector<DataType> supportedTypes =
2550  {
2558  };
2559 
2560  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2561  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2562  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2563 
2564  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2565  inputTensorInfo1,
2566  outputTensorInfo,
2567  descriptorName,
2568  "input_0",
2569  "input_1");
2570 }
2571 
2572 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2573 {
2574  const std::string descriptorName{"DebugQueueDescriptor"};
2575 
2576  ValidateNumInputs(workloadInfo, descriptorName, 1);
2577  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2578 }
2579 
2580 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2581 {
2582  const std::string descriptorName{"EqualQueueDescriptor"};
2583 
2584  ValidateNumInputs(workloadInfo, descriptorName, 2);
2585  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2586 
2587  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2588  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2589  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2590 
2591  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2592  inputTensorInfo1,
2593  outputTensorInfo,
2594  descriptorName,
2595  "input_0",
2596  "input_1");
2597 
2598  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2599  {
2600  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2601  }
2602 }
2603 
2604 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2605 {
2606  const std::string descriptorName{"GreaterQueueDescriptor"};
2607 
2608  ValidateNumInputs(workloadInfo, descriptorName, 2);
2609  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2610 
2611  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2612  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2613  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2614 
2615  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2616  inputTensorInfo1,
2617  outputTensorInfo,
2618  descriptorName,
2619  "input_0",
2620  "input_1");
2621 
2622  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2623  {
2624  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2625  }
2626 }
2627 
2628 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2629 {
2630  const std::string descriptorName{"RsqrtQueueDescriptor"};
2631 
2632  ValidateNumInputs(workloadInfo, descriptorName, 1);
2633  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2634 
2635  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2636  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2637 
2638  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2639 
2640  std::vector<DataType> supportedTypes =
2641  {
2648  };
2649 
2650  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2651  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2652 }
2653 
2654 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2655 {
2656  const std::string descriptorName{"GatherQueueDescriptor"};
2657 
2658  ValidateNumInputs(workloadInfo, descriptorName, 2);
2659  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2660 
2661  const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2662  if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2663  {
2664  throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2665  }
2666 
2667  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2668  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2669 
2670  std::vector<DataType> supportedTypes =
2671  {
2679  };
2680 
2681  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2682 
2683  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2684 
2685  unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2686  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2687 }
2688 
2690 {
2691  const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2692 
2693  ValidateNumInputs(workloadInfo, descriptorName, 2);
2694 
2695  if (workloadInfo.m_OutputTensorInfos.size() != 4)
2696  {
2697  throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2698  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2699  }
2700 
2701  if (m_Anchors == nullptr)
2702  {
2703  throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2704  }
2705 
2706  const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2707  const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2708  const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2709 
2710  const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2711  const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2712  const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2713  const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2714 
2715  ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2716  ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2717  ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2718 
2719  const std::vector<DataType> supportedInputTypes =
2720  {
2727  };
2728 
2729  ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2730  ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2731  ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2732 
2733  ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2734  ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2735  ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2736  ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2737 
2738  // NOTE: Output is always Float32 regardless of input type
2739  ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2740  ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2741  ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2742  ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2743 
2744  if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2745  {
2746  throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2747  "must be positive and less than or equal to 1.");
2748  }
2749 
2750  if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2751  {
2752  throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2753  "should be equal to number of classes + 1.");
2754  }
2755 }
2756 
2757 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2758 {
2759  const std::string& descriptorName{"DequantizeQueueDescriptor"};
2760 
2761  ValidateNumInputs(workloadInfo, descriptorName, 1);
2762  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2763 
2764  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2765  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2766 
2767  if (!IsQuantizedType(inputTensorInfo.GetDataType()))
2768  {
2769  throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2770  }
2771 
2772  std::vector<DataType> supportedTypes =
2773  {
2777  };
2778 
2779  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2780 }
2781 
2782 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2783 {
2784  const std::string& descriptorName{"MergeQueueDescriptor"};
2785 
2786  ValidateNumInputs(workloadInfo, descriptorName, 2);
2787  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2788 
2789  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2790  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2791  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2792 
2793  ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2794  ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2795 
2796  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2797  ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2798 }
2799 
2800 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2801 {
2802  const std::string& descriptorName{"SwitchQueueDescriptor"};
2803 
2804  ValidateNumInputs(workloadInfo, descriptorName, 2);
2805  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2806 
2807  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2808  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2809 
2810  const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2811  const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2812 
2813  std::vector<DataType> supportedTypes =
2814  {
2820  };
2821 
2822  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2823  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2824 
2825  ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2826  ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2827 
2828  ValidateTensorShapesMatch(inputTensorInfo0,
2829  outputTensorInfo0,
2830  descriptorName,
2831  "input_0",
2832  "output_0");
2833 
2834  ValidateTensorShapesMatch(inputTensorInfo0,
2835  outputTensorInfo1,
2836  descriptorName,
2837  "input_0",
2838  "output_1");
2839 }
2840 
2841 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
2842 {
2843  // This is internally generated so it should not need validation.
2844 }
2845 
2846 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2847 {
2848  const std::string& descriptorName{"PreluQueueDescriptor"};
2849 
2850  ValidateNumInputs(workloadInfo, descriptorName, 2);
2851  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2852 
2853  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2854  const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2855  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2856 
2857  std::vector<DataType> supportedTypes
2858  {
2865  };
2866 
2867  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2868  ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2869 
2870  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2871 
2872  ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2873  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2874 
2875  ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2876  alphaTensorInfo,
2877  outputTensorInfo,
2878  descriptorName,
2879  "input",
2880  "alpha");
2881 }
2882 
2884 {
2885  const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2886 
2887  ValidateNumInputs(workloadInfo, descriptorName, 1);
2888  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2889 
2890  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2891  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2892 
2893  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2894  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2895 
2896  ValidatePointer(m_Weight, descriptorName, "weight");
2897 
2898  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2899  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2900 
2901  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2902 
2903  Optional<TensorInfo> optionalBiasTensorInfo;
2904  if (m_Parameters.m_BiasEnabled)
2905  {
2906  ValidatePointer(m_Bias, descriptorName, "bias");
2907 
2908  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2909  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
2910 
2911  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
2912  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2913  }
2914 
2915  ValidatePerAxisQuantization(inputTensorInfo,
2916  outputTensorInfo,
2917  weightTensorInfo,
2918  optionalBiasTensorInfo,
2919  descriptorName);
2920 
2921  std::vector<DataType> supportedTypes =
2922  {
2929  };
2930 
2931  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2932  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2933 }
2934 
2935 void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2936 {
2937  const std::string descriptorName{"TransposeQueueDescriptor"};
2938 
2939  ValidateNumInputs(workloadInfo, descriptorName, 1);
2940  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2941 
2942  const PermutationVector& mapping = m_Parameters.m_DimMappings;
2943 
2944  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2945  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2946 
2947  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
2948  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
2949 
2950  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
2951  {
2952  if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
2953  {
2954  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
2955  " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
2956  "must match dst dimension " + to_string(i) +
2957  " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
2958  }
2959  }
2960 
2961  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2962 }
2963 
2964 void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2965 {
2966  const std::string descriptorName{"QLstmQueueDescriptor"};
2967 
2968  // Validate number of inputs/outputs
2969  ValidateNumInputs(workloadInfo, descriptorName, 3);
2970  ValidateNumOutputs(workloadInfo, descriptorName, 3);
2971 
2972  // Input/output tensor info
2973  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2974  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[1];
2975  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[2];
2976 
2977  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2978  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2979  auto outputInfo = workloadInfo.m_OutputTensorInfos[2];
2980 
2981  // Supported types for various tensors in QLSTM
2982  std::vector<DataType> inputOutputSupportedTypes =
2983  {
2985  };
2986 
2987  std::vector<DataType> cellStateSupportedTypes =
2988  {
2990  };
2991 
2992  std::vector<DataType> weightsSupportedTypes =
2993  {
2995  };
2996 
2997  std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
2998  {
3000  };
3001 
3002  std::vector<DataType> biasSupportedTypes =
3003  {
3005  };
3006 
3007  // Validate types of input/output tensors
3008  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3009  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3010  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3011 
3012  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3013  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3014  ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
3015 
3016  // Validate matching types of input/output tensors
3017  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3018  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3019  "outputStateIn", "outputStateOut");
3020  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3021 
3022  // Infer number of batches, number of units, input size and output size from tensor dimensions
3023  const uint32_t numBatches = inputInfo.GetShape()[0];
3024  const uint32_t inputSize = inputInfo.GetShape()[1];
3025  const uint32_t outputSize = outputStateInInfo.GetShape()[1];
3026  const uint32_t numUnits = cellStateInInfo.GetShape()[1];
3027 
3028  // Validate number of dimensions and number of elements for input/output tensors
3029  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3030  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3031  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName + " cellStateIn");
3032 
3033  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3034  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName + " cellStateOut");
3035  ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName + " output");
3036 
3037  // Validate number of dimensions and number of elements for MANDATORY weight tensors
3038  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3039  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3040  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize), " InputToForgetWeights");
3041 
3042  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3043  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3044  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize), " InputToCellWeights");
3045 
3046  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3047  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3048  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize), " InputToOutputWeights");
3049 
3050  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3051  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3052  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
3053  " RecurrentToForgetWeights");
3054 
3055  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3056  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3057  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3058 
3059  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3060  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3061  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3062 
3063  // Validate data types for MANDATORY weights tensors (all should match each other)
3064  ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
3065 
3066  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
3067  "inputToForgetWeights", "inputToCellWeights");
3068  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3069  "inputToForgetWeights", "inputToOutputWeights");
3070 
3071  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3072  "inputToForgetWeights", "recurrentToForgeteights");
3073  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3074  "inputToForgetWeights", "recurrentToCellWeights");
3075  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3076  "inputToForgetWeights", "recurrentToOutputWeights");
3077 
3078  // Validate number of dimensions and number of elements for MANDATORY bias tensors
3079  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3080  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3081  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits, " ForgetGateBias");
3082 
3083  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3084  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3085  ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits, " CellBias");
3086 
3087  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3088  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3089  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits, " OutputGateBias");
3090 
3091  // Validate data types for MANDATORY bias tensors
3092  ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
3093 
3094  ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
3095  "forgetGateBias", "cellBias");
3096  ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
3097  "forgetGateBias", "outputGateBias");
3098 
3099  // Validate OPTIONAL params: CIFG (inputToInputWeights, recurrentToInputWeights, inputGateBias)
3100  const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
3101  !m_Parameters.m_CifgEnabled) ||
3102  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3103  !m_InputGateBias && m_Parameters.m_CifgEnabled));
3104 
3105  if (!allCifgParamsPresentOrNot)
3106  {
3107  throw InvalidArgumentException(descriptorName +
3108  ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present "
3109  "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be "
3110  "set appropriately.");
3111  }
3112 
3113  if (!m_Parameters.m_CifgEnabled)
3114  {
3115  // Validate number of dimensions and number of elements
3116  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3117  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize), " InputToInputWeights");
3118 
3119  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3120  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3121  " RecurrentToInputWeights");
3122 
3123  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3124  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits, " InputGateBias");
3125 
3126  // Validate data types
3127  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3128  "inputToForgetWeights", "inputToInputWeights");
3129  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3130  "inputToForgetWeights", "recurrentToInputWeights");
3131  ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3132  "forgetGateBias", "inputGateBias");
3133  }
3134 
3135  // Validate OPTIONAL params: Peephole (cellToInputWeights, cellToForgetWeights, cellToOutputWeights)
3136  bool allPeepholeWeightsPresentOrNot =
3137  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3138  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3139  || (!m_CellToInputWeights && !m_CellToForgetWeights
3140  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3141 
3142  if (!allPeepholeWeightsPresentOrNot)
3143  {
3144  throw InvalidArgumentException(descriptorName +
3145  ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole "
3146  "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present "
3147  "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set "
3148  "appropriately.");
3149  }
3150 
3151  if (m_Parameters.m_PeepholeEnabled)
3152  {
3153  auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3154  ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits, " cellToForgetWeights");
3155  ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3156 
3157  auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3158  ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits, " cellToOutputWeights");
3159  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3160  "cellToForgetWeight", "cellToOutputWeights");
3161 
3162  if (!m_Parameters.m_CifgEnabled)
3163  {
3164  auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3165  ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits, " cellToInputWeights");
3166  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3167  "cellToForgetWeights", "cellToInputWeights");
3168  }
3169  }
3170 
3171  // Validate OPTIONAL params: Layer Norm Weights
3172  bool allLayerNormWeightsPresentOrNot =
3173  (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3174  && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3175  || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3176  && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3177 
3178  if (!allLayerNormWeightsPresentOrNot)
3179  {
3180  throw InvalidArgumentException(descriptorName +
3181  ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights "
3182  "and CellLayerNormWeights should all be present (Layer Norm enabled) or not "
3183  "be present at all (Layer Norm disabled). InputLayerNormWeights should "
3184  "only be present when Layer Norm is enabled and CIFG is disabled. "
3185  "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3186  }
3187 
3188  if (m_Parameters.m_LayerNormEnabled)
3189  {
3190  auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3191  ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits, " forgetLayerNormWeights");
3192  ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3193 
3194  auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3195  ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits, " cellLayerNormWeights");
3196  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3197  "forgetLayerNormWeights", "cellLayerNormWeights");
3198 
3199  auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3200  ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits, " outputLayerNormWeights");
3201  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3202  "forgetLayerNormWeights", "outputLayerNormWeights");
3203 
3204  if (!m_Parameters.m_CifgEnabled)
3205  {
3206  auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3207  ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits, " inputLayerNormWeights");
3208  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3209  "forgetLayerNormWeights", "inputLayerNormWeights");
3210  }
3211  }
3212 
3213  // Validate OPTIONAL params: Projection (projectionWeights, projectionBias)
3214  bool correctProjectionTensorsPresent =
3215  ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3216  (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3217  (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3218 
3219  if (!correctProjectionTensorsPresent)
3220  {
3221  throw InvalidArgumentException(descriptorName +
3222  ": If projection is enabled, ProjectionWeights should be present and "
3223  "ProjectionBias is optional. If projection is disabled, neither "
3224  "ProjectionWeights nor ProjectionBias should be present.");
3225  }
3226 
3227  if (m_Parameters.m_ProjectionEnabled)
3228  {
3229  auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3230  ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize), "ProjectionWeights");
3231  ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3232 
3233  if (m_ProjectionBias)
3234  {
3235  auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
3236  ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize, "ProjectionBias");
3237  ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3238  }
3239 
3240  }
3241  else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3242  outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3243  throw InvalidArgumentException(descriptorName +
3244  ": If projection is disabled, output quantization info (scale, offset) "
3245  "should match HiddenStateScale and HiddenStateZeroPoint.");
3246  }
3247 
3248 }
3249 
3251 {
3252  const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
3253 
3254  // Validate number of inputs/outputs
3255  ValidateNumInputs(workloadInfo, descriptorName, 3);
3256  ValidateNumOutputs(workloadInfo, descriptorName, 2);
3257 
3258  // Input/output tensor infos
3259  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3260  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
3261  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
3262 
3263  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3264  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3265 
3266  std::vector<DataType> inputOutputSupportedTypes =
3267  {
3269  };
3270 
3271  std::vector<DataType> cellStateSupportedTypes =
3272  {
3274  };
3275 
3276  std::vector<DataType> weightsSupportedTypes =
3277  {
3279  };
3280 
3281  std::vector<DataType> biasSupportedTypes =
3282  {
3284  };
3285 
3286  // Validate types of input/output tensors
3287  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3288  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3289  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3290 
3291  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3292  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3293 
3294  // Validate matching types of input/output tensors
3295  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3296  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3297  "outputStateIn", "outputStateOut");
3298  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3299 
3300  // Validate matching quantization info for input/output tensors
3301  ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3302  ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
3303  ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3304 
3305  // Infer number of batches, input size and output size from tensor dimensions
3306  const uint32_t numBatches = inputInfo.GetShape()[0];
3307  const uint32_t inputSize = inputInfo.GetShape()[1];
3308  const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3309 
3310  // Validate number of dimensions and number of elements for input/output tensors
3311  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3312  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
3313  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3314  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
3315  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3316 
3317  // Validate number of dimensions and number of elements for weights tensors
3318  ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
3319  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3320  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
3321 
3322  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3323  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3324  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
3325 
3326  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3327  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3328  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
3329 
3330  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3331  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3332  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
3333 
3334  ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
3335  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3336  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
3337 
3338  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3339  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3340  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3341  " RecurrentToForgetWeights");
3342 
3343  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3344  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3345  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3346 
3347  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3348  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3349  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3350 
3351  // Validate data types for weights tensors (all should match each other)
3352  ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3353 
3354  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3355  "inputToInputWeights", "inputToForgetWeights");
3356  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3357  "inputToInputWeights", "inputToCellWeights");
3358  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3359  "inputToInputWeights", "inputToOutputWeights");
3360 
3361  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3362  "inputToInputWeights", "recurrentToInputWeights");
3363  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3364  "inputToInputWeights", "recurrentToForgeteights");
3365  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3366  "inputToInputWeights", "recurrentToCellWeights");
3367  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3368  "inputToInputWeights", "recurrentToOutputWeights");
3369 
3370  // Validate matching quantization info for weight tensors (all should match each other)
3371  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3372  descriptorName, "inputToInputWeights", "inputToForgetWeights");
3373  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3374  descriptorName, "inputToInputWeights", "inputToCellWeights");
3375  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3376  descriptorName, "inputToInputWeights", "inputToOutputWeights");
3377 
3378  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3379  descriptorName, "inputToInputWeights", "recurrentToInputWeights");
3380  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3381  descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
3382  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3383  descriptorName, "inputToInputWeights", "recurrentToCellWeights");
3384  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3385  descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
3386 
3387  // Validate number of dimensions and number of elements in bias tensors
3388  ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
3389  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3390  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
3391 
3392  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3393  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3394  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
3395 
3396  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3397  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3398  ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
3399 
3400  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3401  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3402  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
3403 
3404  // Validate data types for bias tensors (all should match each other)
3405  ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3406 
3407  ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3408  "inputGateBias", "forgetGateBias");
3409  ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3410  "inputGateBias", "cellBias");
3411  ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3412  "inputGateBias", "outputGateBias");
3413 
3414  // Validate bias tensor quantization info
3415  ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3416  ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3417  ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3418  ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3419 }
3420 
3421 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3422 {
3423  const std::string descriptorName{"AbsQueueDescriptor"};
3424 
3425  ValidateNumInputs(workloadInfo, descriptorName, 1);
3426  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3427 
3428  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3429  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3430 
3431  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3432 
3433  std::vector<DataType> supportedTypes =
3434  {
3442  };
3443 
3444  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3445  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3446 }
3447 
3448 void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3449 {
3450  const std::string descriptorName{"SliceQueueDescriptor"};
3451 
3452  ValidateNumInputs(workloadInfo, descriptorName, 1);
3453  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3454 
3455  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3456  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3457 
3458  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3459 
3460  const unsigned int rank = inputTensorInfo.GetNumDimensions();
3461  if (rank > 4)
3462  {
3463  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
3464  }
3465 
3466  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
3467 
3468  // Check if m_Begin and m_Size have the expected length
3469  if (m_Parameters.m_Begin.size() != rank)
3470  {
3471  throw InvalidArgumentException(descriptorName +
3472  ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3473  }
3474  if (m_Parameters.m_Size.size() != rank)
3475  {
3476  throw InvalidArgumentException(descriptorName +
3477  ": Length of size descriptor must equal rank " + std::to_string(rank));
3478  }
3479 
3480  // Check if the shape of the output tensor matches m_Size
3481  const TensorShape& outputShape = outputTensorInfo.GetShape();
3482  for (unsigned int i = 0u; i < rank; ++i)
3483  {
3484  if (m_Parameters.m_Size[i] != outputShape[i])
3485  {
3486  throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
3487  }
3488  }
3489 
3490  // Check if the sum of begin offset and size in a given dimension
3491  // does not exceed the size of corresponding input
3492  const TensorShape& inputShape = inputTensorInfo.GetShape();
3493  for(unsigned int i = 0u; i < rank; ++i)
3494  {
3495  if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
3496  {
3497  throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
3498  std::to_string(i) + " exceeds input size.");
3499  }
3500  }
3501 }
3502 
3504 {
3505  const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
3506 
3507  ValidateNumInputs(workloadInfo, descriptorName, 1);
3508  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3509 
3510  const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
3511  const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
3512 
3513  ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
3514  ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
3515 
3516  std::vector<DataType> supportedTypes =
3517  {
3524  };
3525 
3526  ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3527  ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3528 
3529  ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
3530 
3531  if (m_Parameters.m_BlockSize == 0)
3532  {
3533  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
3534  }
3535 
3536  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
3537  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
3538  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
3539  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
3540 
3541  const TensorShape& outputShape = outputInfo.GetShape();
3542  if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3543  {
3544  throw InvalidArgumentException(descriptorName + ": Output width and height shape"
3545  "must be divisible by block size.");
3546  }
3547 
3548  const TensorShape& inputShape = inputInfo.GetShape();
3549  if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3550  {
3551  throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
3552  "must be divisible by the square of block size." );
3553  }
3554 }
3555 
3556 void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3557 {
3558  const std::string descriptorName{"ComparisonQueueDescriptor"};
3559 
3560  ValidateNumInputs(workloadInfo, descriptorName, 2);
3561  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3562 
3563  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3564  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3565  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3566 
3567  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3568  inputTensorInfo1,
3569  outputTensorInfo,
3570  descriptorName,
3571  "input_0",
3572  "input_1");
3573 
3574  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3575  {
3576  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3577  }
3578 }
3579 
3581 {
3582  const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3583 
3584  ValidateNumInputs(workloadInfo, descriptorName, 1);
3585  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3586 
3587  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3588  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3589 
3590  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3591 
3592  std::vector<DataType> supportedTypes =
3593  {
3601  };
3602 
3603  std::vector<DataType> logicalSupportedTypes =
3604  {
3606  };
3607 
3608  if (m_Parameters.m_Operation == UnaryOperation::LogicalNot)
3609  {
3610  ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
3611  }
3612  else
3613  {
3614  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3615  }
3616 
3617 
3618  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3619 }
3620 
3621 void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3622 {
3623  const std::string descriptorName{"RankQueueDescriptor"};
3624 
3625  ValidateNumInputs(workloadInfo, descriptorName, 1);
3626  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3627 
3628  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3629  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3630 
3631  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
3632  ValidateTensorNumElements(outputTensorInfo, descriptorName, 1, "output");
3633 
3634  std::vector<DataType> supportedTypes =
3635  {
3644  };
3645 
3646  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3647  ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
3648 }
3649 
3651 {
3652  const std::string descriptorName{"LogicalBinaryQueueDescriptor"};
3653 
3654  ValidateNumInputs(workloadInfo, descriptorName, 2);
3655  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3656 
3657  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3658  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3659  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3660 
3661  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3662  inputTensorInfo1,
3663  outputTensorInfo,
3664  descriptorName,
3665  "input_0",
3666  "input_1");
3667 
3668  if (inputTensorInfo0.GetDataType() != DataType::Boolean)
3669  {
3670  throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean.");
3671  }
3672 
3673  if (inputTensorInfo1.GetDataType() != DataType::Boolean)
3674  {
3675  throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean.");
3676  }
3677 
3678  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3679  {
3680  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3681  }
3682 }
3683 
3684 void ReduceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3685 {
3686  const std::string descriptorName{"ReduceQueueDescriptor"};
3687 
3688  ValidateNumInputs(workloadInfo, descriptorName, 1);
3689  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3690 
3691  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3692  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3693 
3694  std::vector<DataType> supportedTypes =
3695  {
3703  };
3704 
3705  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3706  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3707 }
3708 
3709 } // namespace armnn
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:423
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:260
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:437
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:485
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2021 ARM Limited and Contributors.
void Validate(const WorkloadInfo &workloadInfo) const
SizeType GetSize() const
Definition: Types.hpp:274
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:442
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:197
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:191
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:265
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
DataType
Definition: Types.hpp:36
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
float GetQuantizationScale() const
Definition: Tensor.cpp:452
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
Definition: Tensor.hpp:194
bool has_value() const noexcept
Definition: Optional.hpp:53
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
Contains information about inputs and outputs to a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
unsigned int GetChannelsIndex() const
bool IsQuantized() const
Definition: Tensor.cpp:495
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
Definition: Tensor.hpp:192
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin