ArmNN
 20.05
WorkloadData.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
10 
11 #include <algorithm>
12 #include <iomanip>
13 #include <string>
14 #include <sstream>
15 
16 #include <boost/format.hpp>
17 #include <boost/numeric/conversion/cast.hpp>
18 
19 using namespace armnnUtils;
20 
21 namespace armnn
22 {
23 
24 //---------------------------------------------------------------
26 {
27  switch (inputDataType)
28  {
29  case DataType::Float16:
30  return DataType::Float16;
31  case DataType::BFloat16:
32  case DataType::Float32:
33  return DataType::Float32;
34  case DataType::QAsymmS8:
35  return DataType::Signed32;
36  case DataType::QAsymmU8:
37  return DataType::Signed32;
38  case DataType::QSymmS8:
39  return DataType::Signed32;
40  case DataType::QSymmS16:
41  return DataType::Signed32;
42  default:
43  ARMNN_ASSERT_MSG(false, "Invalid input data type");
44  return DataType::Float32;
45  }
46 }
47 
48 namespace
49 {
50 
51 //---------------------------------------------------------------
52 //android ndk does not support std::to_string function.
53 template <typename T>
54 std::string to_string(T value)
55 {
56  std::ostringstream os;
57  os << value;
58  return os.str();
59 }
60 
61 //---------------------------------------------------------------
62 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
63 {
64  if (!ptr)
65  {
66  throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
67  paramName + " parameter must be set.");
68  }
69 }
70 
71 //---------------------------------------------------------------
72 void ValidateTensorShapesMatch(const TensorInfo& first,
73  const TensorInfo& second,
74  std::string const& descName,
75  std::string const& firstName,
76  std::string const& secondName)
77 {
78  if (first.GetShape() != second.GetShape())
79  {
80  throw InvalidArgumentException(descName + ": "
81  + firstName + " & " + secondName + " must have identical shapes");
82  }
83 }
84 
85 //---------------------------------------------------------------
86 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
87 {
88  if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
89  {
90  throw InvalidArgumentException(descName +
91  ": Requires exactly " + to_string(expectedSize) + "input(s). " +
92  to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
93  }
94 }
95 
96 //---------------------------------------------------------------
97 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
98 {
99  if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
100  {
101  throw InvalidArgumentException(descName +
102  ": Requires exactly " + to_string(expectedSize) + " output(s). " +
103  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
104  }
105 }
106 
107 //---------------------------------------------------------------
108 void ValidateTensorNumDimensions(const TensorInfo& tensor,
109  std::string const& descName,
110  unsigned int numDimensions,
111  std::string const& tensorName)
112 {
113  if (tensor.GetNumDimensions() != numDimensions)
114  {
115  throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
116  to_string(tensor.GetNumDimensions()) + " dimensions for " +
117  tensorName + " tensor.");
118  }
119 }
120 
121 //---------------------------------------------------------------
122 void ValidateTensorNumElements(const TensorInfo& tensor,
123  std::string const& descName,
124  unsigned int numElements,
125  std::string const& tensorName)
126 {
127  if (tensor.GetNumElements() != numElements)
128  {
129  throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
130  to_string(tensor.GetNumElements()) + " elements for " +
131  tensorName + " tensor.");
132  }
133 }
134 
135 //---------------------------------------------------------------
136 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
137  unsigned int numDimension,
138  unsigned int numElements,
139  std::string const& tensorName)
140 {
141  const std::string functionName{"ValidateTensorNumDimNumElem"};
142  ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
143  ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
144 }
145 
146 //---------------------------------------------------------------
147 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
148  const std::string& descName, std::string const& tensorName)
149 {
150  if (tensor.GetDataType() != dataType)
151  {
152  throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
153  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
154  }
155 }
156 
157 void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
158 {
160  if (tensor.GetDataType() != DataType::QSymmS8 &&
161  tensor.GetDataType() != DataType::QuantizedSymm8PerAxis)
162  {
163  throw InvalidArgumentException(descName +
164  ": Expected data type which supports per-axis quantization scheme but got " +
165  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
166  }
168 }
169 
170 //---------------------------------------------------------------
171 void ValidateTensorQuantizationSpace(const TensorInfo& first,
172  const TensorInfo& second,
173  const std::string& descName,
174  std::string const& firstName,
175  std::string const& secondName)
176 {
177  if (!first.IsQuantized() ||
178  !second.IsQuantized())
179  {
180  // Not a quantized type, ignore the validation
181  return;
182  }
183 
184  DataType firstDataType = first.GetDataType();
185  DataType secondDataType = second.GetDataType();
186 
187  if (firstDataType != secondDataType)
188  {
189  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
190  " must be of the same quantized type, " +
191  firstName + " is " + GetDataTypeName(firstDataType) + ", " +
192  secondName + " is " + GetDataTypeName(secondDataType));
193  }
194 
195  if (!first.IsTypeSpaceMatch(second))
196  {
197  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
198  " must have the same quantization space, " +
199  firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
200  " and scale " + to_string(first.GetQuantizationScale()) + ", " +
201  secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
202  " and scale " + to_string(second.GetQuantizationScale()));
203  }
204 }
205 
206 //---------------------------------------------------------------
207 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
208  const TensorInfo& inputTensorInfo,
209  const TensorInfo& weightsTensorInfo,
210  const std::string& descName)
211 {
212  // Helper lambda function to validate a single bias quantization scale value
213  auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
214  {
215  constexpr float tolerance = 0.000001f;
216  if (std::abs(biasScale - expectedScale) > tolerance)
217  {
218  // Print the float values with extra precision to see very small differences
219  std::stringstream msg;
220  msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
221  " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
222  biasScale;
223  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
224  }
225  };
226 
227  if (biasTensor.GetQuantizationOffset() != 0)
228  {
229  throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
230  to_string(biasTensor.GetQuantizationOffset()));
231  }
232 
233  if (biasTensor.HasMultipleQuantizationScales())
234  {
235  // Validate per-axis quantization scales
236  const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
237  const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
238 
239  if (weightScales.size() != biasScales.size())
240  {
241  std::stringstream msg;
242  msg << descName << ": Expected matchhing number of per-axis quantization scales, but got different "
243  << "values: weights=" << weightScales.size() << ", biases=" << biasScales.size();
244  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
245  }
246 
247  for (size_t i = 0ul; i < biasScales.size(); ++i)
248  {
249  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
250  VerifyBiasQuantizationScale(biasScales[i], expectedScale);
251  }
252  }
253  else
254  {
255  // Validate per-tensor quantization scale
256  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
257  VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
258  }
259 }
260 
261 //---------------------------------------------------------------
262 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
263  unsigned int numExpected,
264  const std::string& descName,
265  const std::string& varName)
266 {
267  if (vec.empty() && numExpected > 0)
268  {
269  throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
270  }
271 
272  for (unsigned int i = 0; i < numExpected; ++i)
273  {
274  if (!vec[i])
275  {
276  throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
277  }
278  }
279 }
280 
281 //---------------------------------------------------------------
282 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
283  const TensorInfo& second,
284  const TensorInfo& output,
285  std::string const& descName,
286  std::string const& firstName,
287  std::string const& secondName)
288 {
289  // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
290  // broadcasted.
291  if (first.GetNumDimensions() != second.GetNumDimensions())
292  {
293  throw InvalidArgumentException(descName + ": Tensors "
294  + firstName + " & " + secondName
295  + " must have the same number of dimensions in order to be broadcasted");
296  }
297  uint32_t numDims = first.GetNumDimensions();
298  std::vector<uint32_t> outputDims(numDims, 0u);
299  for (uint32_t i = 0; i < numDims; i++)
300  {
301  const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
302  const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
303  if (dimsNotEqual && dimsNotOne)
304  {
305  throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
306  }
307  outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
308  }
309  TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
310  if (broadcastShape != output.GetShape())
311  {
312  throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
313  + firstName + " & " + secondName
314  + " does not match the output shape");
315  }
316 }
317 
318 //---------------------------------------------------------------
319 void ValidateDataTypes(const TensorInfo& info,
320  const std::vector<armnn::DataType>& supportedTypes,
321  std::string const& descName)
322 {
323  auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
324  if (iterator == supportedTypes.end())
325  {
326  throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
327  }
328 }
329 
330 //---------------------------------------------------------------
331 void ValidateTensorDataTypesMatch(const TensorInfo& first,
332  const TensorInfo& second,
333  std::string const& descName,
334  std::string const& firstName,
335  std::string const& secondName)
336 {
337  if (first.GetDataType() != second.GetDataType())
338  {
339  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
340  " must have identical data types.");
341  }
342 }
343 
344 //---------------------------------------------------------------
345 void ValidateTensorNumElementsMatch(const TensorInfo& first,
346  const TensorInfo& second,
347  std::string const& descName,
348  std::string const& firstName,
349  std::string const& secondName)
350 {
351  if (first.GetNumElements() != second.GetNumElements())
352  {
353  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
354  " must have the same number of elements.");
355  }
356 }
357 
358 void ValidateWeightDataType(const TensorInfo& inputInfo,
359  const TensorInfo& weightInfo,
360  const std::string& descName)
361 {
362  const DataType inputType = inputInfo.GetDataType();
363  if (IsQuantized8BitType(inputType))
364  {
366  const std::vector<DataType> validTypes =
367  {
368  DataType::QAsymmS8,
369  DataType::QAsymmU8,
370  DataType::QSymmS8,
371  DataType::QuantizedSymm8PerAxis // deprecated
372  };
374 
375  ValidateDataTypes(weightInfo, validTypes, descName);
376  }
377  else
378  {
379  ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
380  }
381 }
382 
383 void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
384  const std::string& descName,
385  const std::string& tensorName)
386 {
387  const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
388  if (!quantizationDim.has_value())
389  {
390  throw InvalidArgumentException(boost::str(
391  boost::format("%1%: Quantization dimension for per-axis quantization not set on tensor %2%.")
392  % descName % tensorName));
393  }
394 
395  if (quantizationDim.value() != 0)
396  {
397  throw InvalidArgumentException(boost::str(
398  boost::format("%1%: Quantization dimension for per-axis quantization expected to be 0 on tensor %2%, "
399  "but got: %3%") % descName % tensorName % quantizationDim.value()));
400  }
401 }
402 
403 void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
404  const std::string& descName,
405  const std::string& tensorName)
406 {
407  int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
408  if (quantizationOffset != 0)
409  {
410  throw InvalidArgumentException(boost::str(
411  boost::format("%1%: Quantization offset for per-axis quantization expected to be 0 on tensor %2%, "
412  "but got: %3%") % descName % tensorName % quantizationOffset));
413  }
414 }
415 
416 void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
417  const TensorInfo& outputInfo,
418  const TensorInfo& weightInfo,
419  const Optional<TensorInfo>& optionalBiasInfo,
420  const std::string& descName)
421 {
422  if (weightInfo.HasPerAxisQuantization())
423  {
424  const DataType inputDataType = inputInfo.GetDataType();
425  const DataType outputDataType = outputInfo.GetDataType();
426 
427  const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
428 
429  if (!canHavePerAxisQuantization)
430  {
431  throw InvalidArgumentException(boost::str(
432  boost::format("%1%: Per-axis quantization parameters set on tensor %2%, "
433  "but data type does not support per-axis quantization.") % descName % "weight"));
434  }
435 
436 
437  ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
438  ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
439  ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
440 
441  if (optionalBiasInfo.has_value())
442  {
443  const TensorInfo& biasInfo = optionalBiasInfo.value();
444  if (!biasInfo.HasPerAxisQuantization())
445  {
446  throw InvalidArgumentException(boost::str(
447  boost::format("%1%: Per-axis quantization parameters not set on bias tensor, despite being set on "
448  "weight tensor.") % descName));
449  }
450 
451  ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
452  ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
453  ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
454  }
455  }
456 }
457 
458 } // anonymous namespace
459 
460 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
461  unsigned int numExpectedIn, unsigned int numExpectedOut) const
462 {
463  ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
464  ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
465 }
466 
467 //---------------------------------------------------------------
468 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
469 {
470  const std::string descriptorName{"MemCopyQueueDescriptor"};
471 
472  ValidateNumInputs(workloadInfo, descriptorName, 1);
473  ValidateNumOutputs(workloadInfo, descriptorName , 1);
474 
475  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
476  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
477 
478  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
479  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
480 
481  if (m_Inputs.size() != m_Outputs.size())
482  {
483  throw InvalidArgumentException(boost::str(
484  boost::format("%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
485  descriptorName % m_Inputs.size() % m_Outputs.size()));
486  }
487 
488  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
489  {
490  if (!m_Inputs[i])
491  {
492  throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL input %2%.") %
493  descriptorName % i));
494  }
495 
496  if (!m_Outputs[i])
497  {
498  throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL output %2%") %
499  descriptorName % i));
500  }
501  }
502 }
503 
504 //---------------------------------------------------------------
505 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
506 {
507  ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
508  ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
509 
510  if (workloadInfo.m_InputTensorInfos.size() != 1)
511  {
512  throw InvalidArgumentException(boost::str(
513  boost::format("Number of input infos (%1%) is not 1.")
514  % workloadInfo.m_InputTensorInfos.size()));
515 
516  }
517 
518  if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
519  {
520  throw InvalidArgumentException(boost::str(
521  boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
522  % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
523  }
524 
525  for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
526  {
527  if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
528  workloadInfo.m_OutputTensorInfos[i].GetNumElements())
529  {
530  throw InvalidArgumentException(boost::str(
531  boost::format("Number of elements for tensor input and output %1% does not match")
532  % i ));
533  }
534  }
535 
536  if (m_Inputs.size() != 1)
537  {
538  throw InvalidArgumentException(boost::str(
539  boost::format("Number of inputs (%1%) is not 1.")
540  % m_Inputs.size()));
541  }
542 
543  if (m_Inputs.size() != m_Outputs.size())
544  {
545  throw InvalidArgumentException(boost::str(
546  boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
547  % m_Inputs.size() % m_Outputs.size()));
548  }
549 
550  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
551  {
552  if (!m_Inputs[i])
553  {
554  throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
555  }
556 
557  if (!m_Outputs[i])
558  {
559  throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
560  }
561  }
562 }
563 
564 //---------------------------------------------------------------
565 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
566 {
567  ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
568  ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
569 
570  if (m_Inputs.size() != 1)
571  {
572  throw InvalidArgumentException(boost::str(
573  boost::format("Number of inputs (%1%) is not 1.")
574  % m_Inputs.size()));
575  }
576 
577  if (m_Outputs.size() != 0)
578  {
579  throw InvalidArgumentException(boost::str(
580  boost::format("Number of outputs (%1%) is not 0.")
581  % m_Inputs.size() % m_Outputs.size()));
582  }
583 
584  if (!m_Inputs[0])
585  {
586  throw InvalidArgumentException(boost::str(boost::format("Invalid null input 0")));
587  }
588 }
589 
590 //---------------------------------------------------------------
591 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
592 {
593  const std::string descriptorName{"ActivationQueueDescriptor"};
594 
595  ValidateNumInputs(workloadInfo, descriptorName, 1);
596  ValidateNumOutputs(workloadInfo, descriptorName, 1);
597 
598  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
599  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
600 
601  std::vector<DataType> supportedTypes =
602  {
609  };
610 
611  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
612  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
613  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
614 }
615 
616 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
617 {
618  const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
619 
620  ValidateNumInputs(workloadInfo, descriptorName, 1);
621  ValidateNumOutputs(workloadInfo, descriptorName, 1);
622 
623  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
624  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
625 
626  if (outputTensorInfo.GetDataType() != DataType::Signed32)
627  {
628  throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32.");
629  }
630 
631  std::vector<DataType> supportedInputTypes =
632  {
640  };
641 
642  ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
643 
644  auto inputShape = inputTensorInfo.GetShape();
645  auto outputShape = outputTensorInfo.GetShape();
646 
647  auto inputNumDimensions = inputShape.GetNumDimensions();
648  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
649 
650  const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
651 
652  // 1D input shape results in scalar output shape
653  if (inputShape.GetNumDimensions() == 1)
654  {
655  if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
656  {
657  throw InvalidArgumentException(descriptorName + outputShapeError);
658  }
659  }
660  else
661  {
662  for (unsigned int i = 0; i < unsignedAxis; ++i)
663  {
664  if (outputShape[i] != inputShape[i])
665  {
666  throw InvalidArgumentException(descriptorName + outputShapeError);
667  }
668  }
669 
670  for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
671  {
672  if (outputShape[i - 1] != inputShape[i])
673  {
674  throw InvalidArgumentException(descriptorName + outputShapeError);
675  }
676  }
677  }
678 }
679 
680 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
681 {
682  const std::string descriptorName{"SoftmaxQueueDescriptor"};
683 
684  ValidateNumInputs(workloadInfo, descriptorName, 1);
685  ValidateNumOutputs(workloadInfo, descriptorName, 1);
686 
687  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
688  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
689 
690  std::vector<DataType> supportedTypes =
691  {
698  };
699 
700  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
701  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
702  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
703 }
704 
705 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
706 {
707  const std::string descriptorName{"SplitterQueueDescriptor"};
708 
709  ValidateNumInputs(workloadInfo, descriptorName, 1);
710 
711  // Check the supported data types
712  std::vector<DataType> supportedTypes =
713  {
722  };
723 
724  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
725  for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
726  {
727  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
728  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
729 
730  const std::string outputName = "output_" + std::to_string(i);
731  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
732  }
733 
734  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
735  {
736  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
737  }
738 
739  if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
740  {
742  descriptorName + ": Number of split windows "
743  "has to match number of workloadInfo.m_OutputTensorInfos. "
744  "Number of windows: " +
745  to_string(m_ViewOrigins.size()) +
746  ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
747  }
748 
749  //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
750  std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
751  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
752  {
753  //Checks that the dimensionality of input is same as the split windows.
754  ViewOrigin const& e = m_ViewOrigins[w];
755  if (e.m_Origin.size() != inputDims)
756  {
757  throw InvalidArgumentException(descriptorName + ": Window origin have to "
758  "have the same dimensionality as the input tensor. "
759  "Window origin (index: " +
760  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
761  " dimensions, the input "
762  "tensor has " +
763  to_string(inputDims) + " dimensions.");
764  }
765  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
766  {
767  if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
768  workloadInfo.m_InputTensorInfos[0].GetShape()[i])
769  {
770  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
771  "be smaller or equal than the size of the input in that coord.");
772  }
773  }
774  }
775 }
776 
777 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
778 {
779  const std::string descriptorName{"ConcatQueueDescriptor"};
780 
781  ValidateNumOutputs(workloadInfo, descriptorName, 1);
782 
783  if (m_Inputs.size() <= 0)
784  {
785  throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
786  }
787  if (m_Outputs.size() <= 0)
788  {
789  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
790  }
791 
792  if (workloadInfo.m_InputTensorInfos.size() <= 0)
793  {
794  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
795  }
796  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
797  {
798  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
799  }
800 
801  if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
802  {
803  throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
804  }
805 
806  if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
807  {
808  return;
809  }
810 
811  if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
812  {
814  descriptorName + ": Number of split windows "
815  "has to match number of workloadInfo.m_InputTensorInfos. "
816  "Number of windows: " +
817  to_string(m_ViewOrigins.size()) +
818  ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
819  }
820 
821  //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
822  std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
823  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
824  {
825  //Checks that the dimensionality of output is same as the split windows.
826  ViewOrigin const& e = m_ViewOrigins[w];
827  if (e.m_Origin.size() != outputDims)
828  {
829  throw InvalidArgumentException(descriptorName + ": Window origin have to "
830  "have the same dimensionality as the output tensor. "
831  "Window origin (index: " +
832  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
833  " dimensions, the output "
834  "tensor has " +
835  to_string(outputDims) + " dimensions.");
836  }
837  //Checks that the merge windows are within the output tensor.
838  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
839  {
840  if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
841  > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
842  {
843  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
844  "be smaller or equal than the size of the output in that coord.");
845  }
846  }
847  }
848 
849  // Check the supported data types
850  std::vector<DataType> supportedTypes =
851  {
860  };
861 
862  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
863  for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
864  {
865  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
866  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
867 
868  const std::string inputName = "input_" + std::to_string(i);
869  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
870  }
871 }
872 
873 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
874 {
875  const std::string descriptorName{"StackQueueDescriptor"};
876 
877  ValidateNumOutputs(workloadInfo, descriptorName, 1);
878 
879  if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
880  {
881  throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
882  }
883 
884  // All inputs must have the same shape, which is defined in parameters
885  const TensorShape& inputShape = m_Parameters.m_InputShape;
886  for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
887  {
888  if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
889  {
890  throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
891  }
892  }
893 
894  if (inputShape.GetNumDimensions() > 4)
895  {
896  throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
897  }
898 
899  // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
900  // since the output tensor has an additional dimension.
901  if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
902  {
903  throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
904  "than the number of input dimensions.");
905  }
906 
907  // Output shape must be as inferred from the input shape
908  const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
909  for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
910  {
911  if (outputShape[i] != inputShape[i])
912  {
913  throw InvalidArgumentException(descriptorName + ": Output tensor must "
914  "match shape inferred from input tensor.");
915  }
916  }
917 
918  if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
919  {
920  throw InvalidArgumentException(descriptorName + ": Output tensor must "
921  "match shape inferred from input tensor.");
922  }
923 
924  for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
925  {
926  if (outputShape[i] != inputShape[i-1])
927  {
928  throw InvalidArgumentException(descriptorName + ": Output tensor must "
929  "match shape inferred from input tensor.");
930  }
931  }
932 
933  if (outputShape.GetNumDimensions() > 5)
934  {
935  throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
936  }
937 
938  // Check the supported data types
939  std::vector<DataType> supportedTypes =
940  {
949  };
950 
951  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
952 
953  for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
954  {
955  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
956  workloadInfo.m_InputTensorInfos[i],
957  descriptorName,
958  "input_0",
959  "input_" + std::to_string(i));
960  }
961 
962  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
963  workloadInfo.m_OutputTensorInfos[0],
964  descriptorName,
965  "input_0",
966  "output");
967 }
968 
970 {
971  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
972 
973  ValidateNumInputs(workloadInfo, descriptorName, 1);
974  ValidateNumOutputs(workloadInfo, descriptorName, 1);
975 
976  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
977  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
978 
979  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
980 
981  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
982  {
983  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
984  }
985 
986  ValidatePointer(m_Weight, descriptorName, "weight");
987 
988  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
989  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
990 
991  if (m_Parameters.m_BiasEnabled)
992  {
993  ValidatePointer(m_Bias, descriptorName, "bias");
994 
995  // Validates type and quantization values.
996  const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
997  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
998 
999  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1000  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1001  }
1002 
1003  // Check the supported data types
1004  std::vector<DataType> supportedTypes =
1005  {
1012  };
1013 
1014  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1015 
1016  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1017  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1018  {
1019  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1020  {
1021  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1022  "for BFloat16 input.");
1023  }
1024  }
1025  else
1026  {
1027  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1028  }
1029 }
1030 
1032 {
1033  const std::string descriptorName{"NormalizationQueueDescriptor"};
1034 
1035  ValidateNumInputs(workloadInfo, descriptorName, 1);
1036  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1037 
1038  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1039  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1040 
1041  // Check the supported data types
1042  std::vector<DataType> supportedTypes =
1043  {
1050  };
1051 
1052  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1053 
1054  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1055 
1056  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1057 }
1058 
1059 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1060 {
1061  const std::string descriptorName{"AdditionQueueDescriptor"};
1062 
1063  ValidateNumInputs(workloadInfo, descriptorName, 2);
1064  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1065 
1066  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1067  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1068  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1069 
1070  std::vector<DataType> supportedTypes =
1071  {
1078  };
1079 
1080  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1081  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1082  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1083 
1084  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1085  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1086 
1087  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1088  inputTensorInfo1,
1089  outputTensorInfo,
1090  descriptorName,
1091  "input_0",
1092  "input_1");
1093 }
1094 
1096 {
1097  const std::string descriptorName{"MultiplicationQueueDescriptor"};
1098 
1099  ValidateNumInputs(workloadInfo, descriptorName, 2);
1100  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1101 
1102  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1103  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1104  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1105 
1106  std::vector<DataType> supportedTypes =
1107  {
1114  };
1115 
1116  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1117  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1118  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1119 
1120  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1121  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1122 
1123  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1124  inputTensorInfo1,
1125  outputTensorInfo,
1126  descriptorName,
1127  "input_0",
1128  "input_1");
1129 }
1130 
1132 {
1133  const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
1134 
1135  ValidateNumInputs(workloadInfo, descriptorName, 1);
1136  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1137 
1138  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1139  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1140 
1141  std::vector<DataType> supportedTypes =
1142  {
1149  };
1150 
1151  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1152  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1153 
1154  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1155  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1156 
1157  ValidatePointer(m_Mean, descriptorName, "mean");
1158  ValidatePointer(m_Variance, descriptorName, "variance");
1159  ValidatePointer(m_Beta, descriptorName, "beta");
1160  ValidatePointer(m_Gamma, descriptorName, "gamma");
1161 
1162  const TensorInfo& mean = m_Mean->GetTensorInfo();
1163  const TensorInfo& variance = m_Variance->GetTensorInfo();
1164  const TensorInfo& beta = m_Beta->GetTensorInfo();
1165  const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1166 
1167  ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1168  ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1169  ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1170  ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
1171 
1172  ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1173  ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1174  ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
1175 }
1176 
1178 {
1179  const std::string descriptorName{"Convolution2dQueueDescriptor"};
1180 
1181  ValidateNumInputs(workloadInfo, descriptorName, 1);
1182  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1183 
1184  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1185  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1186 
1187  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1188  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1189 
1190  ValidatePointer(m_Weight, descriptorName, "weight");
1191 
1192  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1193  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1194 
1195  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1196 
1197  Optional<TensorInfo> optionalBiasTensorInfo;
1198  if (m_Parameters.m_BiasEnabled)
1199  {
1200  ValidatePointer(m_Bias, descriptorName, "bias");
1201 
1202  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1203  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1204 
1205  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1206  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1207  }
1208 
1209  ValidatePerAxisQuantization(inputTensorInfo,
1210  outputTensorInfo,
1211  weightTensorInfo,
1212  optionalBiasTensorInfo,
1213  descriptorName);
1214 
1215  std::vector<DataType> supportedTypes =
1216  {
1224  };
1225 
1226  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1227 
1228  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1229  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1230  {
1231  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1232  {
1233  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1234  "for BFloat16 input.");
1235  }
1236  }
1237  else
1238  {
1239  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1240  }
1241 }
1242 
1244 {
1245  const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1246 
1247  ValidateNumInputs(workloadInfo, descriptorName, 1);
1248  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1249 
1250  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1251  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1252 
1253  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1254  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1255 
1256  ValidatePointer(m_Weight, descriptorName, "weight");
1257 
1258  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1259  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1260 
1261  if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1262  {
1264  boost::str(boost::format("%1%: dilationX (provided %2%) and dilationY (provided %3%) "
1265  "cannot be smaller than 1.") % descriptorName %
1266  m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1267  }
1268 
1269  const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1270 
1271  // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1272  // inputChannels * channelMultiplier should be equal to outputChannels.
1273  const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1274  const unsigned int numWeightInputChannels = weightTensorInfo.GetShape()[1];
1275  const unsigned int numWeightOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1276  if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1277  {
1279  boost::str(boost::format("%1%: output_channels (provided %2%) should be "
1280  "equal to input_channels (provided %3%) multiplied by channel_multiplier "
1281  "(provided %4%).") % descriptorName % numWeightOutputChannels %
1282  numWeightInputChannels % numWeightChannelMultiplier));
1283  }
1284 
1285  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1286 
1287  Optional<TensorInfo> optionalBiasTensorInfo;
1288  if (m_Parameters.m_BiasEnabled)
1289  {
1290  ValidatePointer(m_Bias, descriptorName, "bias");
1291 
1292  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1293  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1294 
1295  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1296  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1297  }
1298  ValidatePerAxisQuantization(inputTensorInfo,
1299  outputTensorInfo,
1300  weightTensorInfo,
1301  optionalBiasTensorInfo,
1302  descriptorName);
1303 
1304  std::vector<DataType> supportedTypes =
1305  {
1312  };
1313 
1314  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1315  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1316 }
1317 
1318 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1319 {
1320  const std::string descriptorName{"PermuteQueueDescriptor"};
1321 
1322  ValidateNumInputs(workloadInfo, descriptorName, 1);
1323  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1324 
1325  const PermutationVector& mapping = m_Parameters.m_DimMappings;
1326 
1327  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1328  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1329 
1330  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1331  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1332 
1333  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1334  {
1335  if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1336  {
1337  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1338  " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1339  "must match dst dimension " + to_string(mapping[i]) +
1340  " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1341  }
1342  }
1343 
1344  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1345 }
1346 
1347 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1348 {
1349  const std::string descriptorName{"Pooling2dQueueDescriptor"};
1350 
1351  ValidateNumInputs(workloadInfo, descriptorName, 1);
1352  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1353 
1354  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1355  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1356 
1357  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1358  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1359 
1360  std::vector<DataType> supportedTypes =
1361  {
1368  };
1369 
1370  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1371  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1372 }
1373 
1375 {
1376  const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1377 
1378  ValidateNumInputs(workloadInfo, descriptorName, 1);
1379  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1380 
1381  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1382  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1383 
1384  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1385  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1386 
1387  std::vector<DataType> supportedTypes =
1388  {
1395  };
1396 
1397  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1398  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1399 
1400  // ResizeBilinear only changes width and height: batch and channel count must match.
1401  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1402  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1403  if (inputBatchSize != outputBatchSize)
1404  {
1406  boost::str(boost::format("%1%: Input batch size (%2%) "
1407  "does not match output batch size (%3%)") %
1408  descriptorName % inputBatchSize % outputBatchSize));
1409  }
1410 
1411  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1412  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1413  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1414  if (inputChannelCount != outputChannelCount)
1415  {
1417  boost::str(boost::format("%1%: Input channel count (%2%) "
1418  "does not match output channel count (%3%)") %
1419  descriptorName % inputChannelCount % outputChannelCount));
1420  }
1421 }
1422 
1423 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1424 {
1425  const std::string descriptorName{"ResizeQueueDescriptor"};
1426 
1427  ValidateNumInputs(workloadInfo, descriptorName, 1);
1428  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1429 
1430  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1431  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1432 
1433  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1434  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1435 
1436  std::vector<DataType> supportedTypes =
1437  {
1444  };
1445 
1446  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1447  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1448 
1449  // Resize only changes width and height: batch and channel count must match.
1450  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1451  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1452  if (inputBatchSize != outputBatchSize)
1453  {
1455  boost::str(boost::format("%1%: Input batch size (%2%) "
1456  "does not match output batch size (%3%)") %
1457  descriptorName % inputBatchSize % outputBatchSize));
1458  }
1459 
1460  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1461  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1462  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1463  if (inputChannelCount != outputChannelCount)
1464  {
1466  boost::str(boost::format("%1%: Input channel count (%2%) "
1467  "does not match output channel count (%3%)") %
1468  descriptorName % inputChannelCount % outputChannelCount));
1469  }
1470 }
1471 
1473 {
1474  const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1475 
1476  ValidateNumInputs(workloadInfo, descriptorName, 1);
1477  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1478 
1479  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1480  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1481 
1482  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1483  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1484 
1485  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1486 
1487  if (m_Parameters.m_Min > m_Parameters.m_Max)
1488  {
1489  throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1490  }
1491 }
1492 
1494 {
1495  const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1496 
1497  ValidateNumInputs(workloadInfo, descriptorName, 1);
1498  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1499 
1500  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1501  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1502 
1503  if (inputTensorInfo.GetNumDimensions() > 4)
1504  {
1505  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1506  }
1507 
1508  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1509 
1510  // Check the supported data types
1511  std::vector<DataType> supportedTypes =
1512  {
1516  };
1517 
1518  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1519  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1520 }
1521 
1523 {
1524  const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1525 
1526  ValidateNumInputs(workloadInfo, descriptorName, 1);
1527  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1528 
1529  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1530  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1531 
1532  if (inputTensorInfo.GetNumDimensions() > 4)
1533  {
1534  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1535  }
1536 
1537  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1538 
1539  // Check the supported data types
1540  std::vector<DataType> supportedTypes =
1541  {
1548  };
1549 
1550  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1551  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1552 }
1553 
1554 void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1555 {
1556  const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1557 
1558  ValidateNumInputs(workloadInfo, descriptorName, 1);
1559  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1560 
1561  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1562  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1563 
1564  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1565 
1566  std::vector<DataType> supportedTypes =
1567  {
1571  };
1572 
1573  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1574  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1575 }
1576 
1577 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1578 {
1579  const std::string descriptorName{"ConstantQueueDescriptor"};
1580 
1581  ValidateNumInputs(workloadInfo, descriptorName, 0);
1582  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1583 
1584  if (!m_LayerOutput)
1585  {
1586  throw InvalidArgumentException(descriptorName + ": No const input specified.");
1587  }
1588 
1589  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1590  ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1591 
1592  // Check the supported data types
1593  std::vector<DataType> supportedTypes =
1594  {
1603  };
1604 
1605  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1606 }
1607 
1608 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1609 {
1610  const std::string descriptorName{"ReshapeQueueDescriptor"};
1611 
1612  ValidateNumInputs(workloadInfo, descriptorName, 1);
1613  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1614 
1615  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1616  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1617 
1618  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1619 
1620  // Check the supported data types
1621  std::vector<DataType> supportedTypes =
1622  {
1630  };
1631 
1632  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1633  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1634 }
1635 
1637 {
1638  const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1639 
1640  ValidateNumInputs(workloadInfo, descriptorName, 1);
1641  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1642 
1643  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1644  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1645 
1646  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1647  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1648 
1649  if (m_Parameters.m_BlockShape.size() != 2)
1650  {
1651  throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1652  }
1653 
1654  if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1655  {
1656  throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1657  "dimensions as Block Shape.");
1658  }
1659 
1660  const TensorShape& inputShape = inputTensorInfo.GetShape();
1661 
1662  std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1663  std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1664 
1665  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1666 
1667  const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1668  widthPad.first + widthPad.second;
1669  const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1670  heightPad.first + heightPad.second;
1671 
1672  const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1673  inputShape[dimensionIndices.GetChannelsIndex()];
1674  const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1675 
1676  if (numOutputElements != numInputElements)
1677  {
1678  throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1679  to_string(numInputElements) + " after padding but output tensor has " +
1680  to_string(numOutputElements) + " elements.");
1681  }
1682 
1683  if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1684  {
1685  throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1686  "divisible by Block Shape in all spatial dimensions");
1687  }
1688 
1689  std::vector<DataType> supportedTypes =
1690  {
1697  };
1698 
1699  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1700  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1701 }
1702 
1704 {
1705  const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1706 
1707  ValidateNumInputs(workloadInfo, descriptorName, 1);
1708  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1709 
1710  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1711  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1712 
1713  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1714  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1715 
1716  std::vector<DataType> supportedTypes =
1717  {
1724  };
1725 
1726  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1727  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1728 
1729  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1730 
1731  if (m_Parameters.m_BlockSize == 0)
1732  {
1733  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1734  }
1735 
1736  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1737  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1738  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1739  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1740 
1741  const TensorShape& inputShape = inputTensorInfo.GetShape();
1742  if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1743  {
1744  throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1745  "by block size in all spatial dimensions");
1746  }
1747 
1748  const TensorShape& outputShape = outputTensorInfo.GetShape();
1749  if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1750  {
1751  throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1752  "must be divisible by the square of block size." );
1753  }
1754 }
1755 
1756 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1757 {
1758  const std::string descriptorName{"FloorQueueDescriptor"};
1759 
1760  ValidateNumInputs(workloadInfo, descriptorName, 1);
1761  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1762 
1763  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1764  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1765 
1766  std::vector<DataType> supportedTypes =
1767  {
1772  };
1773 
1774  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1775 
1776  if (inputTensorInfo != outputTensorInfo)
1777  {
1778  throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1779  }
1780 }
1781 
1782 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1783 {
1784  // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1785 
1786  const std::string descriptorName{"LstmQueueDescriptor"};
1787 
1788  // check dimensions of all inputs and outputs
1789  if (workloadInfo.m_InputTensorInfos.size() != 3)
1790  {
1791  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1792  }
1793  if (workloadInfo.m_OutputTensorInfos.size() != 4)
1794  {
1795  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1796  }
1797 
1798  std::vector<DataType> supportedTypes =
1799  {
1804  };
1805 
1806  // check for supported type of one input and match them with all the other input and output
1807  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1808 
1809  // type matches all other inputs
1810  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1811  {
1812  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1813  workloadInfo.m_InputTensorInfos[i],
1814  descriptorName,
1815  "input_0",
1816  "input_" + std::to_string(i));
1817  }
1818  // type matches all other outputs
1819  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1820  {
1821  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1822  workloadInfo.m_OutputTensorInfos[i],
1823  "LstmQueueDescriptor",
1824  "input_0",
1825  "output_" + std::to_string(i));
1826  }
1827 
1828  // Making sure clipping parameters have valid values.
1829  // == 0 means no clipping
1830  // > 0 means clipping
1831  if (m_Parameters.m_ClippingThresCell < 0.0f)
1832  {
1833  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
1834  }
1835  if (m_Parameters.m_ClippingThresProj < 0.0f)
1836  {
1837  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
1838  }
1839 
1840 
1841  // Inferring batch size, number of outputs and number of cells from the inputs.
1842  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1843  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1844  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1845  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1846  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1847  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1848 
1849  // input tensor
1850  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1851  descriptorName + " input_0");
1852  // outputStateInTensor
1853  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1854  descriptorName + " input_1");
1855  // outputStateInTensor
1856  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1857  descriptorName + " input_2");
1858  // scratchBufferTensor
1859  unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1860  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1861  descriptorName + " output_0");
1862  // outputStateOutTensor
1863  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1864  descriptorName + " output_1");
1865  // cellStateOutTensor
1866  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1867  descriptorName + " output_2");
1868  // outputTensor
1869  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1870  descriptorName + " output_3");
1871 
1872 
1873  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1874  if ( m_InputToInputWeights )
1875  {
1876  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1877  (n_cell * n_input), "InputLayerNormWeights");
1878  }
1879 
1880  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1881  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1882  (n_cell * n_input), "InputToForgetWeights");
1883 
1884  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1885  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1886  (n_cell * n_input), "InputToCellWeights");
1887 
1888  if ( m_RecurrentToInputWeights )
1889  {
1890  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1891  (n_cell * n_output), "RecurrentToInputWeights");
1892  }
1893 
1894  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1895  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1896  (n_cell * n_output), "RecurrentToForgetWeights");
1897 
1898  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1899  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1900  (n_cell * n_output), "RecurrentToCellWeights");
1901 
1902  // Make sure the input-gate's parameters are either both present (regular
1903  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1904  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1905  !m_Parameters.m_CifgEnabled) ||
1906  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1907  m_Parameters.m_CifgEnabled));
1908  if (!cifg_weights_all_or_none)
1909  {
1910  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
1911  "RecurrentToInputWeights must either both be present (regular LSTM) "
1912  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
1913  "accordingly.");
1914  }
1915 
1916  if ( m_CellToInputWeights )
1917  {
1918  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1919  n_cell, "CellToInputWeights");
1920  }
1921  if ( m_CellToForgetWeights )
1922  {
1923  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1924  n_cell, "CellToForgetWeights");
1925  }
1926  if ( m_CellToOutputWeights )
1927  {
1928  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1929  n_cell, "CellToOutputWeights");
1930  }
1931 
1932  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1933  bool peephole_weights_all_or_none =
1934  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1935  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1936  || ( !m_CellToInputWeights && !m_CellToForgetWeights
1937  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1938  if (!peephole_weights_all_or_none)
1939  {
1940  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
1941  }
1942 
1943  // Make sure the input gate bias is present only when not a CIFG-LSTM.
1944  if (m_Parameters.m_CifgEnabled)
1945  {
1946  if (m_InputGateBias)
1947  {
1948  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
1949  }
1950  }
1951  else
1952  {
1953  if (!m_InputGateBias)
1954  {
1955  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
1956  "must be present.");
1957  }
1958  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1959  n_cell, "InputGateBias");
1960  }
1961 
1962  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
1963  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
1964 
1965  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
1966  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
1967 
1968  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
1969  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
1970 
1971  if (m_ProjectionWeights)
1972  {
1973  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1974  (n_cell * n_output), "ProjectionWeights");
1975  }
1976  if (m_ProjectionBias)
1977  {
1978  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
1979  }
1980 
1981  // Making sure the projection tensors are consistent:
1982  // 1) If projection weight is not present, then projection bias should not be
1983  // present.
1984  // 2) If projection weight is present, then projection bias is optional.
1985  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1986  !m_Parameters.m_ProjectionEnabled)
1987  || (m_ProjectionWeights && !m_ProjectionBias &&
1988  m_Parameters.m_ProjectionEnabled)
1989  || (m_ProjectionWeights && m_ProjectionBias &&
1990  m_Parameters.m_ProjectionEnabled));
1991  if (!projecton_tensors_consistent)
1992  {
1993  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
1994  }
1995 
1996  // The four layer normalization weights either all have values or none of them have values. Additionally, if
1997  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
1998  // either all have values or none of them have values. Layer normalization is used when the values of all the
1999  // layer normalization weights are present
2000  if (m_InputLayerNormWeights)
2001  {
2002  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
2003  }
2004  if (m_ForgetLayerNormWeights)
2005  {
2006  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2007  }
2008  if (m_CellLayerNormWeights)
2009  {
2010  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2011  }
2012  if (m_OutputLayerNormWeights)
2013  {
2014  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2015  }
2016 
2017  if (m_Parameters.m_LayerNormEnabled)
2018  {
2019  if (!m_Parameters.m_CifgEnabled)
2020  {
2021  if (!m_InputLayerNormWeights)
2022  {
2023  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
2024  "disabled but InputLayerNormWeights are not present");
2025  }
2026  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2027  1, n_cell, "InputLayerNormWeights");
2028  }
2029  else if (m_InputLayerNormWeights)
2030  {
2031  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
2032  "enabled");
2033  }
2034 
2035  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
2036  "ForgetLayerNormWeights");
2037  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2038 
2039  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
2040  "OutputLayerNormWeights");
2041  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2042 
2043  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
2044  "CellLayerNormWeights");
2045  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2046  }
2047  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2048  {
2049  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
2050  "normalisation weights are present.");
2051  }
2052 }
2053 
2055 {
2056  const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"};
2057 
2058  ValidateNumInputs(workloadInfo, descriptorName, 1);
2059  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2060 
2061  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2062  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2063 
2064  if (inputTensorInfo.GetDataType() != DataType::BFloat16)
2065  {
2066  throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16.");
2067  }
2068 
2069  if (outputTensorInfo.GetDataType() != DataType::Float32)
2070  {
2071  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2072  }
2073 
2074  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2075 }
2076 
2078 {
2079  const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
2080 
2081  ValidateNumInputs(workloadInfo, descriptorName, 1);
2082  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2083 
2084  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2085  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2086 
2087  if (inputTensorInfo.GetDataType() != DataType::Float32)
2088  {
2089  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2090  }
2091 
2092  if (outputTensorInfo.GetDataType() != DataType::BFloat16)
2093  {
2094  throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
2095  }
2096 
2097  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2098 }
2099 
2101 {
2102  const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
2103 
2104  ValidateNumInputs(workloadInfo, descriptorName, 1);
2105  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2106 
2107  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2108  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2109 
2110  if (inputTensorInfo.GetDataType() != DataType::Float32)
2111  {
2112  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2113  }
2114 
2115  if (outputTensorInfo.GetDataType() != DataType::Float16)
2116  {
2117  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
2118  }
2119 
2120  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2121 }
2122 
2124 {
2125  const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
2126 
2127  ValidateNumInputs(workloadInfo, descriptorName, 1);
2128  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2129 
2130  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2131  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2132 
2133  if (inputTensorInfo.GetDataType() != DataType::Float16)
2134  {
2135  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
2136  }
2137 
2138  if (outputTensorInfo.GetDataType() != DataType::Float32)
2139  {
2140  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2141  }
2142 
2143  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2144 }
2145 
2146 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2147 {
2148  const std::string descriptorName{"DivisionQueueDescriptor"};
2149 
2150  ValidateNumInputs(workloadInfo, descriptorName, 2);
2151  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2152 
2153  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2154  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2155  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2156 
2157  std::vector<DataType> supportedTypes =
2158  {
2165  };
2166 
2167  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2168  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2169  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2170 
2171  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2172  inputTensorInfo1,
2173  outputTensorInfo,
2174  descriptorName,
2175  "input_0",
2176  "input_1");
2177 }
2178 
2180 {
2181  const std::string descriptorName{"SubtractionQueueDescriptor"};
2182 
2183  ValidateNumInputs(workloadInfo, descriptorName, 2);
2184  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2185 
2186  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2187  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2188  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2189 
2190  std::vector<DataType> supportedTypes =
2191  {
2198  };
2199 
2200  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2201  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2202  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2203 
2204  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2205  inputTensorInfo1,
2206  outputTensorInfo,
2207  descriptorName,
2208  "input_0",
2209  "input_1");
2210 }
2211 
2212 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2213 {
2214  const std::string descriptorName{"MaximumQueueDescriptor"};
2215 
2216  ValidateNumInputs(workloadInfo, descriptorName, 2);
2217  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2218 
2219  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2220  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2221  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2222 
2223  std::vector<DataType> supportedTypes =
2224  {
2232  };
2233 
2234  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2235  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2236  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2237 
2238  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2239  inputTensorInfo1,
2240  outputTensorInfo,
2241  descriptorName,
2242  "input_0",
2243  "input_1");
2244 }
2245 
2246 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2247 {
2248  const std::string descriptorName{"MeanQueueDescriptor"};
2249 
2250  ValidateNumInputs(workloadInfo, descriptorName, 1);
2251  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2252 
2253  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2254  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2255 
2256  std::vector<DataType> supportedTypes =
2257  {
2264  };
2265 
2266  // First check if input tensor data type is supported, then
2267  // check if this data type matches the output tensor data type
2268  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2269  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2270 
2271  if (m_Parameters.m_KeepDims)
2272  {
2273  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2274  }
2275  else if (m_Parameters.m_Axis.empty())
2276  {
2277  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
2278  }
2279  else
2280  {
2281  unsigned int outputDim =
2282  inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
2283  ValidateTensorNumDimensions(outputTensorInfo,
2284  descriptorName,
2285  outputDim > 0 ? outputDim : 1,
2286  "output");
2287  }
2288 }
2289 
2290 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2291 {
2292  const std::string descriptorName{"PadQueueDescriptor"};
2293 
2294  ValidateNumInputs(workloadInfo, descriptorName, 1);
2295  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2296 
2297  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2298  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2299 
2300  // input and output should have the same number of dimensions
2301  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2302 
2303  // there should be entry in the pad list for each dimension in the input tensor
2304  if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2305  throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2306  "as there are dimensions in the input tensor that is " +
2307  std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2308  " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
2309  }
2310 }
2311 
2312 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2313 {
2314  const std::string descriptorName{"QuantizeQueueDescriptor"};
2315 
2316  ValidateNumInputs(workloadInfo, descriptorName, 1);
2317  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2318 
2319  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2320  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2321 
2322  std::vector<DataType> supportedTypes =
2323  {
2331  };
2332 
2333  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2334 
2335  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2336  {
2337  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2338  }
2339 }
2340 
2342 {
2343  const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
2344 
2345  ValidateNumInputs(workloadInfo, descriptorName, 1);
2346  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2347 
2348  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2349  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2350 
2351  std::vector<DataType> supportedTypes =
2352  {
2359  };
2360 
2361  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2362  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2363 }
2364 
2366 {
2367  const std::string descriptorName{"StridedSliceQueueDescriptor"};
2368 
2369  ValidateNumInputs(workloadInfo, descriptorName, 1);
2370  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2371 
2372  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2373  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2374 
2375  std::vector<DataType> supportedTypes =
2376  {
2383  };
2384 
2385  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2386  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2387 
2388  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2389 
2390  const uint32_t rank = inputTensorInfo.GetNumDimensions();
2391  if (rank > 4)
2392  {
2393  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2394  }
2395 
2396  // Begin, End & Stride length must be of rank(input0)
2397  if (m_Parameters.m_Begin.size() != rank)
2398  {
2399  throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2400  }
2401 
2402  if (m_Parameters.m_End.size() != rank)
2403  {
2404  throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2405  }
2406 
2407  if (m_Parameters.m_Stride.size() != rank)
2408  {
2409  throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2410  }
2411 
2412  // Stride entries must be non-zero
2413  for (auto& stride : m_Parameters.m_Stride)
2414  {
2415  if (stride == 0)
2416  {
2417  throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2418  }
2419  }
2420 }
2421 
2422 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2423 {
2424  const std::string descriptorName{"MinimumQueueDescriptor"};
2425 
2426  ValidateNumInputs(workloadInfo, descriptorName, 2);
2427  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2428 
2429  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2430  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2431  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2432 
2433  std::vector<DataType> supportedTypes =
2434  {
2442  };
2443 
2444  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2445  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2446  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2447 
2448  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2449  inputTensorInfo1,
2450  outputTensorInfo,
2451  descriptorName,
2452  "input_0",
2453  "input_1");
2454 }
2455 
2456 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2457 {
2458  const std::string descriptorName{"DebugQueueDescriptor"};
2459 
2460  ValidateNumInputs(workloadInfo, descriptorName, 1);
2461  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2462 }
2463 
2464 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2465 {
2466  const std::string descriptorName{"EqualQueueDescriptor"};
2467 
2468  ValidateNumInputs(workloadInfo, descriptorName, 2);
2469  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2470 
2471  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2472  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2473  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2474 
2475  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2476  inputTensorInfo1,
2477  outputTensorInfo,
2478  descriptorName,
2479  "input_0",
2480  "input_1");
2481 
2482  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2483  {
2484  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2485  }
2486 }
2487 
2488 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2489 {
2490  const std::string descriptorName{"GreaterQueueDescriptor"};
2491 
2492  ValidateNumInputs(workloadInfo, descriptorName, 2);
2493  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2494 
2495  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2496  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2497  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2498 
2499  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2500  inputTensorInfo1,
2501  outputTensorInfo,
2502  descriptorName,
2503  "input_0",
2504  "input_1");
2505 
2506  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2507  {
2508  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2509  }
2510 }
2511 
2512 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2513 {
2514  const std::string descriptorName{"RsqrtQueueDescriptor"};
2515 
2516  ValidateNumInputs(workloadInfo, descriptorName, 1);
2517  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2518 
2519  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2520  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2521 
2522  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2523 
2524  std::vector<DataType> supportedTypes =
2525  {
2532  };
2533 
2534  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2535  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2536 }
2537 
2538 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2539 {
2540  const std::string descriptorName{"GatherQueueDescriptor"};
2541 
2542  ValidateNumInputs(workloadInfo, descriptorName, 2);
2543  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2544 
2545  const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2546  if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2547  {
2548  throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2549  }
2550 
2551  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2552  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2553 
2554  std::vector<DataType> supportedTypes =
2555  {
2562  };
2563 
2564  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2565 
2566  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2567 
2568  unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2569  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2570 }
2571 
2573 {
2574  const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2575 
2576  ValidateNumInputs(workloadInfo, descriptorName, 2);
2577 
2578  if (workloadInfo.m_OutputTensorInfos.size() != 4)
2579  {
2580  throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2581  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2582  }
2583 
2584  if (m_Anchors == nullptr)
2585  {
2586  throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2587  }
2588 
2589  const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2590  const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2591  const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2592 
2593  const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2594  const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2595  const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2596  const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2597 
2598  ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2599  ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2600  ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2601 
2602  const std::vector<DataType> supportedInputTypes =
2603  {
2610  };
2611 
2612  ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2613  ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2614  ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2615 
2616  ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2617  ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2618  ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2619  ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2620 
2621  // NOTE: Output is always Float32 regardless of input type
2622  ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2623  ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2624  ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2625  ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2626 
2627  if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2628  {
2629  throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2630  "must be positive and less than or equal to 1.");
2631  }
2632 
2633  if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2634  {
2635  throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2636  "should be equal to number of classes + 1.");
2637  }
2638 }
2639 
2640 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2641 {
2642  const std::string& descriptorName{"DequantizeQueueDescriptor"};
2643 
2644  ValidateNumInputs(workloadInfo, descriptorName, 1);
2645  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2646 
2647  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2648  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2649 
2650  if (!IsQuantizedType(inputTensorInfo.GetDataType()))
2651  {
2652  throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2653  }
2654 
2655  std::vector<DataType> supportedTypes =
2656  {
2660  };
2661 
2662  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2663 }
2664 
2665 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2666 {
2667  const std::string& descriptorName{"MergeQueueDescriptor"};
2668 
2669  ValidateNumInputs(workloadInfo, descriptorName, 2);
2670  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2671 
2672  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2673  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2674  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2675 
2676  ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2677  ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2678 
2679  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2680  ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2681 }
2682 
2683 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2684 {
2685  const std::string& descriptorName{"SwitchQueueDescriptor"};
2686 
2687  ValidateNumInputs(workloadInfo, descriptorName, 2);
2688  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2689 
2690  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2691  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2692 
2693  const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2694  const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2695 
2696  std::vector<DataType> supportedTypes =
2697  {
2703  };
2704 
2705  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2706  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2707 
2708  ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2709  ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2710 
2711  ValidateTensorShapesMatch(inputTensorInfo0,
2712  outputTensorInfo0,
2713  descriptorName,
2714  "input_0",
2715  "output_0");
2716 
2717  ValidateTensorShapesMatch(inputTensorInfo0,
2718  outputTensorInfo1,
2719  descriptorName,
2720  "input_0",
2721  "output_1");
2722 }
2723 
2724 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
2725 {
2726  // This is internally generated so it should not need validation.
2727 }
2728 
2729 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2730 {
2731  const std::string& descriptorName{"PreluQueueDescriptor"};
2732 
2733  ValidateNumInputs(workloadInfo, descriptorName, 2);
2734  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2735 
2736  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2737  const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2738  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2739 
2740  std::vector<DataType> supportedTypes
2741  {
2748  };
2749 
2750  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2751  ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2752 
2753  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2754 
2755  ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2756  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2757 
2758  ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2759  alphaTensorInfo,
2760  outputTensorInfo,
2761  descriptorName,
2762  "input",
2763  "alpha");
2764 }
2765 
2767 {
2768  const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2769 
2770  ValidateNumInputs(workloadInfo, descriptorName, 1);
2771  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2772 
2773  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2774  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2775 
2776  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2777  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2778 
2779  ValidatePointer(m_Weight, descriptorName, "weight");
2780 
2781  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2782  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2783 
2784  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2785 
2786  Optional<TensorInfo> optionalBiasTensorInfo;
2787  if (m_Parameters.m_BiasEnabled)
2788  {
2789  ValidatePointer(m_Bias, descriptorName, "bias");
2790 
2791  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2792  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
2793 
2794  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
2795  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2796  }
2797 
2798  ValidatePerAxisQuantization(inputTensorInfo,
2799  outputTensorInfo,
2800  weightTensorInfo,
2801  optionalBiasTensorInfo,
2802  descriptorName);
2803 
2804  std::vector<DataType> supportedTypes =
2805  {
2812  };
2813 
2814  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2815  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2816 }
2817 
2818 void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2819 {
2820  const std::string descriptorName{"TransposeQueueDescriptor"};
2821 
2822  ValidateNumInputs(workloadInfo, descriptorName, 1);
2823  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2824 
2825  const PermutationVector& mapping = m_Parameters.m_DimMappings;
2826 
2827  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2828  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2829 
2830  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
2831  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
2832 
2833  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
2834  {
2835  if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
2836  {
2837  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
2838  " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
2839  "must match dst dimension " + to_string(i) +
2840  " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
2841  }
2842  }
2843 
2844  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2845 }
2846 
2847 void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2848 {
2849  const std::string descriptorName{"QLstmQueueDescriptor"};
2850 
2851  // Validate number of inputs/outputs
2852  ValidateNumInputs(workloadInfo, descriptorName, 3);
2853  ValidateNumOutputs(workloadInfo, descriptorName, 3);
2854 
2855  // Input/output tensor info
2856  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2857  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[1];
2858  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[2];
2859 
2860  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2861  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2862  auto outputInfo = workloadInfo.m_OutputTensorInfos[2];
2863 
2864  // Supported types for various tensors in QLSTM
2865  std::vector<DataType> inputOutputSupportedTypes =
2866  {
2868  };
2869 
2870  std::vector<DataType> cellStateSupportedTypes =
2871  {
2873  };
2874 
2875  std::vector<DataType> weightsSupportedTypes =
2876  {
2878  };
2879 
2880  std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
2881  {
2883  };
2884 
2885  std::vector<DataType> biasSupportedTypes =
2886  {
2888  };
2889 
2890  // Validate types of input/output tensors
2891  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2892  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2893  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2894 
2895  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2896  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2897  ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
2898 
2899  // Validate matching types of input/output tensors
2900  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2901  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2902  "outputStateIn", "outputStateOut");
2903  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2904 
2905  // Infer number of batches, number of units, input size and output size from tensor dimensions
2906  const uint32_t numBatches = inputInfo.GetShape()[0];
2907  const uint32_t inputSize = inputInfo.GetShape()[1];
2908  const uint32_t outputSize = outputStateInInfo.GetShape()[1];
2909  const uint32_t numUnits = cellStateInInfo.GetShape()[1];
2910 
2911  // Validate number of dimensions and number of elements for input/output tensors
2912  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
2913  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
2914  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName + " cellStateIn");
2915 
2916  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
2917  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName + " cellStateOut");
2918  ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName + " output");
2919 
2920  // Validate number of dimensions and number of elements for MANDATORY weight tensors
2921  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
2922  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2923  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize), " InputToForgetWeights");
2924 
2925  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
2926  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2927  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize), " InputToCellWeights");
2928 
2929  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
2930  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2931  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize), " InputToOutputWeights");
2932 
2933  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
2934  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2935  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
2936  " RecurrentToForgetWeights");
2937 
2938  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
2939  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2940  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
2941 
2942  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
2943  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2944  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
2945 
2946  // Validate data types for MANDATORY weights tensors (all should match each other)
2947  ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
2948 
2949  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
2950  "inputToForgetWeights", "inputToCellWeights");
2951  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2952  "inputToForgetWeights", "inputToOutputWeights");
2953 
2954  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2955  "inputToForgetWeights", "recurrentToForgeteights");
2956  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2957  "inputToForgetWeights", "recurrentToCellWeights");
2958  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2959  "inputToForgetWeights", "recurrentToOutputWeights");
2960 
2961  // Validate number of dimensions and number of elements for MANDATORY bias tensors
2962  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
2963  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2964  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits, " ForgetGateBias");
2965 
2966  ValidatePointer(m_CellBias, descriptorName, "CellBias");
2967  auto cellBiasInfo = m_CellBias->GetTensorInfo();
2968  ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits, " CellBias");
2969 
2970  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
2971  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2972  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits, " OutputGateBias");
2973 
2974  // Validate data types for MANDATORY bias tensors
2975  ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
2976 
2977  ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
2978  "forgetGateBias", "cellBias");
2979  ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
2980  "forgetGateBias", "outputGateBias");
2981 
2982  // Validate OPTIONAL params: CIFG (inputToInputWeights, recurrentToInputWeights, inputGateBias)
2983  const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
2984  !m_Parameters.m_CifgEnabled) ||
2985  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
2986  !m_InputGateBias && m_Parameters.m_CifgEnabled));
2987 
2988  if (!allCifgParamsPresentOrNot)
2989  {
2990  throw InvalidArgumentException(descriptorName +
2991  ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present "
2992  "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be "
2993  "set appropriately.");
2994  }
2995 
2996  if (!m_Parameters.m_CifgEnabled)
2997  {
2998  // Validate number of dimensions and number of elements
2999  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3000  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize), " InputToInputWeights");
3001 
3002  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3003  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3004  " RecurrentToInputWeights");
3005 
3006  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3007  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits, " InputGateBias");
3008 
3009  // Validate data types
3010  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3011  "inputToForgetWeights", "inputToInputWeights");
3012  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3013  "inputToForgetWeights", "recurrentToInputWeights");
3014  ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3015  "forgetGateBias", "inputGateBias");
3016  }
3017 
3018  // Validate OPTIONAL params: Peephole (cellToInputWeights, cellToForgetWeights, cellToOutputWeights)
3019  bool allPeepholeWeightsPresentOrNot =
3020  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3021  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3022  || (!m_CellToInputWeights && !m_CellToForgetWeights
3023  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3024 
3025  if (!allPeepholeWeightsPresentOrNot)
3026  {
3027  throw InvalidArgumentException(descriptorName +
3028  ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole "
3029  "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present "
3030  "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set "
3031  "appropriately.");
3032  }
3033 
3034  if (m_Parameters.m_PeepholeEnabled)
3035  {
3036  auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3037  ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits, " cellToForgetWeights");
3038  ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3039 
3040  auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3041  ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits, " cellToOutputWeights");
3042  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3043  "cellToForgetWeight", "cellToOutputWeights");
3044 
3045  if (!m_Parameters.m_CifgEnabled)
3046  {
3047  auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3048  ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits, " cellToInputWeights");
3049  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3050  "cellToForgetWeights", "cellToInputWeights");
3051  }
3052  }
3053 
3054  // Validate OPTIONAL params: Layer Norm Weights
3055  bool allLayerNormWeightsPresentOrNot =
3056  (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3057  && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3058  || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3059  && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3060 
3061  if (!allLayerNormWeightsPresentOrNot)
3062  {
3063  throw InvalidArgumentException(descriptorName +
3064  ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights "
3065  "and CellLayerNormWeights should all be present (Layer Norm enabled) or not "
3066  "be present at all (Layer Norm disabled). InputLayerNormWeights should "
3067  "only be present when Layer Norm is enabled and CIFG is disabled. "
3068  "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3069  }
3070 
3071  if (m_Parameters.m_LayerNormEnabled)
3072  {
3073  auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3074  ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits, " forgetLayerNormWeights");
3075  ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3076 
3077  auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3078  ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits, " cellLayerNormWeights");
3079  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3080  "forgetLayerNormWeights", "cellLayerNormWeights");
3081 
3082  auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3083  ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits, " outputLayerNormWeights");
3084  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3085  "forgetLayerNormWeights", "outputLayerNormWeights");
3086 
3087  if (!m_Parameters.m_CifgEnabled)
3088  {
3089  auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3090  ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits, " inputLayerNormWeights");
3091  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3092  "forgetLayerNormWeights", "inputLayerNormWeights");
3093  }
3094  }
3095 
3096  // Validate OPTIONAL params: Projection (projectionWeights, projectionBias)
3097  bool correctProjectionTensorsPresent =
3098  ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3099  (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3100  (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3101 
3102  if (!correctProjectionTensorsPresent)
3103  {
3104  throw InvalidArgumentException(descriptorName +
3105  ": If projection is enabled, ProjectionWeights should be present and "
3106  "ProjectionBias is optional. If projection is disabled, neither "
3107  "ProjectionWeights nor ProjectionBias should be present.");
3108  }
3109 
3110  if (m_Parameters.m_ProjectionEnabled)
3111  {
3112  auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3113  ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize), "ProjectionWeights");
3114  ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3115 
3116  if (m_ProjectionBias)
3117  {
3118  auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
3119  ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize, "ProjectionBias");
3120  ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3121  }
3122 
3123  }
3124  else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3125  outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3126  throw InvalidArgumentException(descriptorName +
3127  ": If projection is disabled, output quantization info (scale, offset) "
3128  "should match HiddenStateScale and HiddenStateZeroPoint.");
3129  }
3130 
3131 }
3132 
3134 {
3135  const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
3136 
3137  // Validate number of inputs/outputs
3138  ValidateNumInputs(workloadInfo, descriptorName, 3);
3139  ValidateNumOutputs(workloadInfo, descriptorName, 2);
3140 
3141  // Input/output tensor infos
3142  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3143  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
3144  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
3145 
3146  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3147  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3148 
3149  std::vector<DataType> inputOutputSupportedTypes =
3150  {
3152  };
3153 
3154  std::vector<DataType> cellStateSupportedTypes =
3155  {
3157  };
3158 
3159  std::vector<DataType> weightsSupportedTypes =
3160  {
3162  };
3163 
3164  std::vector<DataType> biasSupportedTypes =
3165  {
3167  };
3168 
3169  // Validate types of input/output tensors
3170  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3171  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3172  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3173 
3174  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3175  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3176 
3177  // Validate matching types of input/output tensors
3178  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3179  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3180  "outputStateIn", "outputStateOut");
3181  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3182 
3183  // Validate matching quantization info for input/output tensors
3184  ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3185  ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
3186  ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3187 
3188  // Infer number of batches, input size and output size from tensor dimensions
3189  const uint32_t numBatches = inputInfo.GetShape()[0];
3190  const uint32_t inputSize = inputInfo.GetShape()[1];
3191  const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3192 
3193  // Validate number of dimensions and number of elements for input/output tensors
3194  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3195  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
3196  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3197  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
3198  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3199 
3200  // Validate number of dimensions and number of elements for weights tensors
3201  ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
3202  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3203  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
3204 
3205  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3206  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3207  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
3208 
3209  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3210  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3211  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
3212 
3213  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3214  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3215  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
3216 
3217  ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
3218  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3219  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
3220 
3221  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3222  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3223  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3224  " RecurrentToForgetWeights");
3225 
3226  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3227  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3228  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3229 
3230  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3231  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3232  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3233 
3234  // Validate data types for weights tensors (all should match each other)
3235  ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3236 
3237  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3238  "inputToInputWeights", "inputToForgetWeights");
3239  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3240  "inputToInputWeights", "inputToCellWeights");
3241  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3242  "inputToInputWeights", "inputToOutputWeights");
3243 
3244  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3245  "inputToInputWeights", "recurrentToInputWeights");
3246  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3247  "inputToInputWeights", "recurrentToForgeteights");
3248  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3249  "inputToInputWeights", "recurrentToCellWeights");
3250  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3251  "inputToInputWeights", "recurrentToOutputWeights");
3252 
3253  // Validate matching quantization info for weight tensors (all should match each other)
3254  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3255  descriptorName, "inputToInputWeights", "inputToForgetWeights");
3256  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3257  descriptorName, "inputToInputWeights", "inputToCellWeights");
3258  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3259  descriptorName, "inputToInputWeights", "inputToOutputWeights");
3260 
3261  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3262  descriptorName, "inputToInputWeights", "recurrentToInputWeights");
3263  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3264  descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
3265  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3266  descriptorName, "inputToInputWeights", "recurrentToCellWeights");
3267  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3268  descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
3269 
3270  // Validate number of dimensions and number of elements in bias tensors
3271  ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
3272  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3273  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
3274 
3275  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3276  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3277  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
3278 
3279  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3280  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3281  ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
3282 
3283  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3284  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3285  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
3286 
3287  // Validate data types for bias tensors (all should match each other)
3288  ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3289 
3290  ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3291  "inputGateBias", "forgetGateBias");
3292  ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3293  "inputGateBias", "cellBias");
3294  ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3295  "inputGateBias", "outputGateBias");
3296 
3297  // Validate bias tensor quantization info
3298  ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3299  ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3300  ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3301  ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3302 }
3303 
3304 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3305 {
3306  const std::string descriptorName{"AbsQueueDescriptor"};
3307 
3308  ValidateNumInputs(workloadInfo, descriptorName, 1);
3309  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3310 
3311  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3312  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3313 
3314  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3315 
3316  std::vector<DataType> supportedTypes =
3317  {
3325  };
3326 
3327  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3328  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3329 }
3330 
3331 void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3332 {
3333  const std::string descriptorName{"SliceQueueDescriptor"};
3334 
3335  ValidateNumInputs(workloadInfo, descriptorName, 1);
3336  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3337 
3338  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3339  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3340 
3341  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3342 
3343  const unsigned int rank = inputTensorInfo.GetNumDimensions();
3344  if (rank > 4)
3345  {
3346  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
3347  }
3348 
3349  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
3350 
3351  // Check if m_Begin and m_Size have the expected length
3352  if (m_Parameters.m_Begin.size() != rank)
3353  {
3354  throw InvalidArgumentException(descriptorName +
3355  ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3356  }
3357  if (m_Parameters.m_Size.size() != rank)
3358  {
3359  throw InvalidArgumentException(descriptorName +
3360  ": Length of size descriptor must equal rank " + std::to_string(rank));
3361  }
3362 
3363  // Check if the shape of the output tensor matches m_Size
3364  const TensorShape& outputShape = outputTensorInfo.GetShape();
3365  for (unsigned int i = 0u; i < rank; ++i)
3366  {
3367  if (m_Parameters.m_Size[i] != outputShape[i])
3368  {
3369  throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
3370  }
3371  }
3372 
3373  // Check if the sum of begin offset and size in a given dimension
3374  // does not exceed the size of corresponding input
3375  const TensorShape& inputShape = inputTensorInfo.GetShape();
3376  for(unsigned int i = 0u; i < rank; ++i)
3377  {
3378  if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
3379  {
3380  throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
3381  std::to_string(i) + " exceeds input size.");
3382  }
3383  }
3384 }
3385 
3387 {
3388  const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
3389 
3390  ValidateNumInputs(workloadInfo, descriptorName, 1);
3391  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3392 
3393  const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
3394  const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
3395 
3396  ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
3397  ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
3398 
3399  std::vector<DataType> supportedTypes =
3400  {
3407  };
3408 
3409  ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3410  ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3411 
3412  ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
3413 
3414  if (m_Parameters.m_BlockSize == 0)
3415  {
3416  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
3417  }
3418 
3419  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
3420  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
3421  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
3422  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
3423 
3424  const TensorShape& outputShape = outputInfo.GetShape();
3425  if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3426  {
3427  throw InvalidArgumentException(descriptorName + ": Output width and height shape"
3428  "must be divisible by block size.");
3429  }
3430 
3431  const TensorShape& inputShape = inputInfo.GetShape();
3432  if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3433  {
3434  throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
3435  "must be divisible by the square of block size." );
3436  }
3437 }
3438 
3439 void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3440 {
3441  const std::string descriptorName{"ComparisonQueueDescriptor"};
3442 
3443  ValidateNumInputs(workloadInfo, descriptorName, 2);
3444  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3445 
3446  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3447  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3448  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3449 
3450  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3451  inputTensorInfo1,
3452  outputTensorInfo,
3453  descriptorName,
3454  "input_0",
3455  "input_1");
3456 
3457  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3458  {
3459  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3460  }
3461 }
3462 
3464 {
3465  const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3466 
3467  ValidateNumInputs(workloadInfo, descriptorName, 1);
3468  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3469 
3470  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3471  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3472 
3473  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3474 
3475  std::vector<DataType> supportedTypes =
3476  {
3484  };
3485 
3486  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3487  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3488 }
3489 
3490 } // namespace armnn
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:219
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:236
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:233
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:281
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2020 ARM Limited.
void Validate(const WorkloadInfo &workloadInfo) const
SizeType GetSize() const
Definition: Types.hpp:202
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:238
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:98
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:168
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:241
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:265
float GetQuantizationScale() const
Definition: Tensor.cpp:248
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
Definition: Tensor.hpp:95
bool has_value() const noexcept
Definition: Optional.hpp:53
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
Contains information about inputs and outputs to a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
unsigned int GetChannelsIndex() const
bool IsQuantized() const
Definition: Tensor.cpp:291
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin