ArmNN
 20.02
WorkloadData.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
10 
11 #include <algorithm>
12 #include <iomanip>
13 #include <string>
14 #include <sstream>
15 
16 #include <boost/format.hpp>
17 #include <boost/numeric/conversion/cast.hpp>
18 
19 using namespace armnnUtils;
20 
21 namespace armnn
22 {
23 
24 //---------------------------------------------------------------
26 {
27  switch (inputDataType)
28  {
29  case DataType::BFloat16:
30  return DataType::BFloat16;
31  case DataType::Float16:
32  return DataType::Float16;
33  case DataType::Float32:
34  return DataType::Float32;
35  case DataType::QAsymmS8:
36  return DataType::Signed32;
37  case DataType::QAsymmU8:
38  return DataType::Signed32;
39  case DataType::QSymmS8:
40  return DataType::Signed32;
41  case DataType::QSymmS16:
42  return DataType::Signed32;
43  default:
44  BOOST_ASSERT_MSG(false, "Invalid input data type");
45  return DataType::Float32;
46  }
47 }
48 
49 namespace
50 {
51 
52 //---------------------------------------------------------------
53 //android ndk does not support std::to_string function.
54 template <typename T>
55 std::string to_string(T value)
56 {
57  std::ostringstream os;
58  os << value;
59  return os.str();
60 }
61 
62 //---------------------------------------------------------------
63 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
64 {
65  if (!ptr)
66  {
67  throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
68  paramName + " parameter must be set.");
69  }
70 }
71 
72 //---------------------------------------------------------------
73 void ValidateTensorShapesMatch(const TensorInfo& first,
74  const TensorInfo& second,
75  std::string const& descName,
76  std::string const& firstName,
77  std::string const& secondName)
78 {
79  if (first.GetShape() != second.GetShape())
80  {
81  throw InvalidArgumentException(descName + ": "
82  + firstName + " & " + secondName + " must have identical shapes");
83  }
84 }
85 
86 //---------------------------------------------------------------
87 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
88 {
89  if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
90  {
91  throw InvalidArgumentException(descName +
92  ": Requires exactly " + to_string(expectedSize) + "input(s). " +
93  to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
94  }
95 }
96 
97 //---------------------------------------------------------------
98 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
99 {
100  if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
101  {
102  throw InvalidArgumentException(descName +
103  ": Requires exactly " + to_string(expectedSize) + " output(s). " +
104  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
105  }
106 }
107 
108 //---------------------------------------------------------------
109 void ValidateTensorNumDimensions(const TensorInfo& tensor,
110  std::string const& descName,
111  unsigned int numDimensions,
112  std::string const& tensorName)
113 {
114  if (tensor.GetNumDimensions() != numDimensions)
115  {
116  throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
117  to_string(tensor.GetNumDimensions()) + " dimensions for " +
118  tensorName + " tensor.");
119  }
120 }
121 
122 //---------------------------------------------------------------
123 void ValidateTensorNumElements(const TensorInfo& tensor,
124  std::string const& descName,
125  unsigned int numElements,
126  std::string const& tensorName)
127 {
128  if (tensor.GetNumElements() != numElements)
129  {
130  throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
131  to_string(tensor.GetNumElements()) + " elements for " +
132  tensorName + " tensor.");
133  }
134 }
135 
136 //---------------------------------------------------------------
137 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
138  unsigned int numDimension,
139  unsigned int numElements,
140  std::string const& tensorName)
141 {
142  const std::string functionName{"ValidateTensorNumDimNumElem"};
143  ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
144  ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
145 }
146 
147 //---------------------------------------------------------------
148 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
149  const std::string& descName, std::string const& tensorName)
150 {
151  if (tensor.GetDataType() != dataType)
152  {
153  throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
154  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
155  }
156 }
157 
158 void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
159 {
161  if (tensor.GetDataType() != DataType::QSymmS8 &&
162  tensor.GetDataType() != DataType::QuantizedSymm8PerAxis)
163  {
164  throw InvalidArgumentException(descName +
165  ": Expected data type which supports per-axis quantization scheme but got " +
166  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
167  }
169 }
170 
171 //---------------------------------------------------------------
172 void ValidateTensorQuantizationSpace(const TensorInfo& first,
173  const TensorInfo& second,
174  const std::string& descName,
175  std::string const& firstName,
176  std::string const& secondName)
177 {
178  if (!first.IsQuantized() ||
179  !second.IsQuantized())
180  {
181  // Not a quantized type, ignore the validation
182  return;
183  }
184 
185  DataType firstDataType = first.GetDataType();
186  DataType secondDataType = second.GetDataType();
187 
188  if (firstDataType != secondDataType)
189  {
190  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
191  " must be of the same quantized type, " +
192  firstName + " is " + GetDataTypeName(firstDataType) + ", " +
193  secondName + " is " + GetDataTypeName(secondDataType));
194  }
195 
196  if (!first.IsTypeSpaceMatch(second))
197  {
198  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
199  " must have the same quantization space, " +
200  firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
201  " and scale " + to_string(first.GetQuantizationScale()) + ", " +
202  secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
203  " and scale " + to_string(second.GetQuantizationScale()));
204  }
205 }
206 
207 //---------------------------------------------------------------
208 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
209  const TensorInfo& inputTensorInfo,
210  const TensorInfo& weightsTensorInfo,
211  const std::string& descName)
212 {
213  // Helper lambda function to validate a single bias quantization scale value
214  auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
215  {
216  constexpr float tolerance = 0.000001f;
217  if (std::abs(biasScale - expectedScale) > tolerance)
218  {
219  // Print the float values with extra precision to see very small differences
220  std::stringstream msg;
221  msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
222  " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
223  biasScale;
224  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
225  }
226  };
227 
228  if (biasTensor.GetQuantizationOffset() != 0)
229  {
230  throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
231  to_string(biasTensor.GetQuantizationOffset()));
232  }
233 
234  if (biasTensor.HasMultipleQuantizationScales())
235  {
236  // Validate per-axis quantization scales
237  const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
238  const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
239 
240  if (weightScales.size() != biasScales.size())
241  {
242  std::stringstream msg;
243  msg << descName << ": Expected matchhing number of per-axis quantization scales, but got different "
244  << "values: weights=" << weightScales.size() << ", biases=" << biasScales.size();
245  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
246  }
247 
248  for (size_t i = 0ul; i < biasScales.size(); ++i)
249  {
250  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
251  VerifyBiasQuantizationScale(biasScales[i], expectedScale);
252  }
253  }
254  else
255  {
256  // Validate per-tensor quantization scale
257  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
258  VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
259  }
260 }
261 
262 //---------------------------------------------------------------
263 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
264  unsigned int numExpected,
265  const std::string& descName,
266  const std::string& varName)
267 {
268  if (vec.empty() && numExpected > 0)
269  {
270  throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
271  }
272 
273  for (unsigned int i = 0; i < numExpected; ++i)
274  {
275  if (!vec[i])
276  {
277  throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
278  }
279  }
280 }
281 
282 //---------------------------------------------------------------
283 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
284  const TensorInfo& second,
285  const TensorInfo& output,
286  std::string const& descName,
287  std::string const& firstName,
288  std::string const& secondName)
289 {
290  // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
291  // broadcasted.
292  if (first.GetNumDimensions() != second.GetNumDimensions())
293  {
294  throw InvalidArgumentException(descName + ": Tensors "
295  + firstName + " & " + secondName
296  + " must have the same number of dimensions in order to be broadcasted");
297  }
298  uint32_t numDims = first.GetNumDimensions();
299  std::vector<uint32_t> outputDims(numDims, 0u);
300  for (uint32_t i = 0; i < numDims; i++)
301  {
302  const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
303  const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
304  if (dimsNotEqual && dimsNotOne)
305  {
306  throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
307  }
308  outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
309  }
310  TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
311  if (broadcastShape != output.GetShape())
312  {
313  throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
314  + firstName + " & " + secondName
315  + " does not match the output shape");
316  }
317 }
318 
319 //---------------------------------------------------------------
320 void ValidateDataTypes(const TensorInfo& info,
321  const std::vector<armnn::DataType>& supportedTypes,
322  std::string const& descName)
323 {
324  auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
325  if (iterator == supportedTypes.end())
326  {
327  throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
328  }
329 }
330 
331 //---------------------------------------------------------------
332 void ValidateTensorDataTypesMatch(const TensorInfo& first,
333  const TensorInfo& second,
334  std::string const& descName,
335  std::string const& firstName,
336  std::string const& secondName)
337 {
338  if (first.GetDataType() != second.GetDataType())
339  {
340  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
341  " must have identical data types.");
342  }
343 }
344 
345 //---------------------------------------------------------------
346 void ValidateTensorNumElementsMatch(const TensorInfo& first,
347  const TensorInfo& second,
348  std::string const& descName,
349  std::string const& firstName,
350  std::string const& secondName)
351 {
352  if (first.GetNumElements() != second.GetNumElements())
353  {
354  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
355  " must have the same number of elements.");
356  }
357 }
358 
359 void ValidateWeightDataType(const TensorInfo& inputInfo,
360  const TensorInfo& weightInfo,
361  const std::string& descName)
362 {
363  const DataType inputType = inputInfo.GetDataType();
364  if (IsQuantized8BitType(inputType))
365  {
367  const std::vector<DataType> validTypes =
368  {
369  DataType::QAsymmU8,
370  DataType::QAsymmS8,
371  DataType::QSymmS8,
372  DataType::QuantizedSymm8PerAxis // deprecated
373  };
375 
376  ValidateDataTypes(weightInfo, validTypes, descName);
377  }
378  else
379  {
380  ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
381  }
382 }
383 
384 void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
385  const std::string& descName,
386  const std::string& tensorName)
387 {
388  const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
389  if (!quantizationDim.has_value())
390  {
391  throw InvalidArgumentException(boost::str(
392  boost::format("%1%: Quantization dimension for per-axis quantization not set on tensor %2%.")
393  % descName % tensorName));
394  }
395 
396  if (quantizationDim.value() != 0)
397  {
398  throw InvalidArgumentException(boost::str(
399  boost::format("%1%: Quantization dimension for per-axis quantization expected to be 0 on tensor %2%, "
400  "but got: %3%") % descName % tensorName % quantizationDim.value()));
401  }
402 }
403 
404 void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
405  const std::string& descName,
406  const std::string& tensorName)
407 {
408  int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
409  if (quantizationOffset != 0)
410  {
411  throw InvalidArgumentException(boost::str(
412  boost::format("%1%: Quantization offset for per-axis quantization expected to be 0 on tensor %2%, "
413  "but got: %3%") % descName % tensorName % quantizationOffset));
414  }
415 }
416 
417 void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
418  const TensorInfo& outputInfo,
419  const TensorInfo& weightInfo,
420  const Optional<TensorInfo>& optionalBiasInfo,
421  const std::string& descName)
422 {
423  if (weightInfo.HasPerAxisQuantization())
424  {
425  const DataType inputDataType = inputInfo.GetDataType();
426  const DataType outputDataType = outputInfo.GetDataType();
427 
428  const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
429 
430  if (!canHavePerAxisQuantization)
431  {
432  throw InvalidArgumentException(boost::str(
433  boost::format("%1%: Per-axis quantization parameters set on tensor %2%, "
434  "but data type does not support per-axis quantization.") % descName % "weight"));
435  }
436 
437 
438  ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
439  ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
440  ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
441 
442  if (optionalBiasInfo.has_value())
443  {
444  const TensorInfo& biasInfo = optionalBiasInfo.value();
445  if (!biasInfo.HasPerAxisQuantization())
446  {
447  throw InvalidArgumentException(boost::str(
448  boost::format("%1%: Per-axis quantization parameters not set on bias tensor, despite being set on "
449  "weight tensor.") % descName));
450  }
451 
452  ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
453  ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
454  ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
455  }
456  }
457 }
458 
459 } // anonymous namespace
460 
461 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
462  unsigned int numExpectedIn, unsigned int numExpectedOut) const
463 {
464  ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
465  ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
466 }
467 
468 //---------------------------------------------------------------
469 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
470 {
471  const std::string descriptorName{"MemCopyQueueDescriptor"};
472 
473  ValidateNumInputs(workloadInfo, descriptorName, 1);
474  ValidateNumOutputs(workloadInfo, descriptorName , 1);
475 
476  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
477  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
478 
479  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
480  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
481 
482  if (m_Inputs.size() != m_Outputs.size())
483  {
484  throw InvalidArgumentException(boost::str(
485  boost::format("%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
486  descriptorName % m_Inputs.size() % m_Outputs.size()));
487  }
488 
489  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
490  {
491  if (!m_Inputs[i])
492  {
493  throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL input %2%.") %
494  descriptorName % i));
495  }
496 
497  if (!m_Outputs[i])
498  {
499  throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL output %2%") %
500  descriptorName % i));
501  }
502  }
503 }
504 
505 //---------------------------------------------------------------
506 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
507 {
508  ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
509  ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
510 
511  if (workloadInfo.m_InputTensorInfos.size() != 1)
512  {
513  throw InvalidArgumentException(boost::str(
514  boost::format("Number of input infos (%1%) is not 1.")
515  % workloadInfo.m_InputTensorInfos.size()));
516 
517  }
518 
519  if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
520  {
521  throw InvalidArgumentException(boost::str(
522  boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
523  % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
524  }
525 
526  for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
527  {
528  if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
529  workloadInfo.m_OutputTensorInfos[i].GetNumElements())
530  {
531  throw InvalidArgumentException(boost::str(
532  boost::format("Number of elements for tensor input and output %1% does not match")
533  % i ));
534  }
535  }
536 
537  if (m_Inputs.size() != 1)
538  {
539  throw InvalidArgumentException(boost::str(
540  boost::format("Number of inputs (%1%) is not 1.")
541  % m_Inputs.size()));
542  }
543 
544  if (m_Inputs.size() != m_Outputs.size())
545  {
546  throw InvalidArgumentException(boost::str(
547  boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
548  % m_Inputs.size() % m_Outputs.size()));
549  }
550 
551  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
552  {
553  if (!m_Inputs[i])
554  {
555  throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
556  }
557 
558  if (!m_Outputs[i])
559  {
560  throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
561  }
562  }
563 }
564 
565 //---------------------------------------------------------------
566 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
567 {
568  ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
569  ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
570 
571  if (m_Inputs.size() != 1)
572  {
573  throw InvalidArgumentException(boost::str(
574  boost::format("Number of inputs (%1%) is not 1.")
575  % m_Inputs.size()));
576  }
577 
578  if (m_Outputs.size() != 0)
579  {
580  throw InvalidArgumentException(boost::str(
581  boost::format("Number of outputs (%1%) is not 0.")
582  % m_Inputs.size() % m_Outputs.size()));
583  }
584 
585  if (!m_Inputs[0])
586  {
587  throw InvalidArgumentException(boost::str(boost::format("Invalid null input 0")));
588  }
589 }
590 
591 //---------------------------------------------------------------
592 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
593 {
594  const std::string descriptorName{"ActivationQueueDescriptor"};
595 
596  ValidateNumInputs(workloadInfo, descriptorName, 1);
597  ValidateNumOutputs(workloadInfo, descriptorName, 1);
598 
599  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
600  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
601 
602  std::vector<DataType> supportedTypes =
603  {
610  };
611 
612  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
613  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
614  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
615 }
616 
617 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
618 {
619  const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
620 
621  ValidateNumInputs(workloadInfo, descriptorName, 1);
622  ValidateNumOutputs(workloadInfo, descriptorName, 1);
623 
624  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
625  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
626 
627  if (outputTensorInfo.GetDataType() != DataType::Signed32)
628  {
629  throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32.");
630  }
631 
632  std::vector<DataType> supportedInputTypes =
633  {
640  };
641 
642  ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
643 
644  auto inputShape = inputTensorInfo.GetShape();
645  auto outputShape = outputTensorInfo.GetShape();
646 
647  auto inputNumDimensions = inputShape.GetNumDimensions();
648  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
649 
650  const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
651 
652  // 1D input shape results in scalar output shape
653  if (inputShape.GetNumDimensions() == 1)
654  {
655  if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
656  {
657  throw InvalidArgumentException(descriptorName + outputShapeError);
658  }
659  }
660  else
661  {
662  for (unsigned int i = 0; i < unsignedAxis; ++i)
663  {
664  if (outputShape[i] != inputShape[i])
665  {
666  throw InvalidArgumentException(descriptorName + outputShapeError);
667  }
668  }
669 
670  for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
671  {
672  if (outputShape[i - 1] != inputShape[i])
673  {
674  throw InvalidArgumentException(descriptorName + outputShapeError);
675  }
676  }
677  }
678 }
679 
680 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
681 {
682  const std::string descriptorName{"SoftmaxQueueDescriptor"};
683 
684  ValidateNumInputs(workloadInfo, descriptorName, 1);
685  ValidateNumOutputs(workloadInfo, descriptorName, 1);
686 
687  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
688  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
689 
690  std::vector<DataType> supportedTypes =
691  {
698  };
699 
700  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
701  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
702  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
703 }
704 
705 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
706 {
707  const std::string descriptorName{"SplitterQueueDescriptor"};
708 
709  ValidateNumInputs(workloadInfo, descriptorName, 1);
710 
711  // Check the supported data types
712  std::vector<DataType> supportedTypes =
713  {
721  };
722 
723  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
724  for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
725  {
726  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
727  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
728 
729  const std::string outputName = "output_" + std::to_string(i);
730  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
731  }
732 
733  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
734  {
735  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
736  }
737 
738  if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
739  {
741  descriptorName + ": Number of split windows "
742  "has to match number of workloadInfo.m_OutputTensorInfos. "
743  "Number of windows: " +
744  to_string(m_ViewOrigins.size()) +
745  ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
746  }
747 
748  //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
749  std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
750  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
751  {
752  //Checks that the dimensionality of input is same as the split windows.
753  ViewOrigin const& e = m_ViewOrigins[w];
754  if (e.m_Origin.size() != inputDims)
755  {
756  throw InvalidArgumentException(descriptorName + ": Window origin have to "
757  "have the same dimensionality as the input tensor. "
758  "Window origin (index: " +
759  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
760  " dimensions, the input "
761  "tensor has " +
762  to_string(inputDims) + " dimensions.");
763  }
764  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
765  {
766  if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
767  workloadInfo.m_InputTensorInfos[0].GetShape()[i])
768  {
769  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
770  "be smaller or equal than the size of the input in that coord.");
771  }
772  }
773  }
774 }
775 
776 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
777 {
778  const std::string descriptorName{"ConcatQueueDescriptor"};
779 
780  ValidateNumOutputs(workloadInfo, descriptorName, 1);
781 
782  if (m_Inputs.size() <= 0)
783  {
784  throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
785  }
786  if (m_Outputs.size() <= 0)
787  {
788  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
789  }
790 
791  if (workloadInfo.m_InputTensorInfos.size() <= 0)
792  {
793  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
794  }
795  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
796  {
797  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
798  }
799 
800  if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
801  {
802  throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
803  }
804 
805  if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
806  {
807  return;
808  }
809 
810  if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
811  {
813  descriptorName + ": Number of split windows "
814  "has to match number of workloadInfo.m_InputTensorInfos. "
815  "Number of windows: " +
816  to_string(m_ViewOrigins.size()) +
817  ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
818  }
819 
820  //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
821  std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
822  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
823  {
824  //Checks that the dimensionality of output is same as the split windows.
825  ViewOrigin const& e = m_ViewOrigins[w];
826  if (e.m_Origin.size() != outputDims)
827  {
828  throw InvalidArgumentException(descriptorName + ": Window origin have to "
829  "have the same dimensionality as the output tensor. "
830  "Window origin (index: " +
831  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
832  " dimensions, the output "
833  "tensor has " +
834  to_string(outputDims) + " dimensions.");
835  }
836  //Checks that the merge windows are within the output tensor.
837  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
838  {
839  if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
840  > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
841  {
842  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
843  "be smaller or equal than the size of the output in that coord.");
844  }
845  }
846  }
847 
848  // Check the supported data types
849  std::vector<DataType> supportedTypes =
850  {
858  };
859 
860  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
861  for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
862  {
863  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
864  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
865 
866  const std::string inputName = "input_" + std::to_string(i);
867  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
868  }
869 }
870 
871 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
872 {
873  const std::string descriptorName{"StackQueueDescriptor"};
874 
875  ValidateNumOutputs(workloadInfo, descriptorName, 1);
876 
877  if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
878  {
879  throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
880  }
881 
882  // All inputs must have the same shape, which is defined in parameters
883  const TensorShape& inputShape = m_Parameters.m_InputShape;
884  for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
885  {
886  if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
887  {
888  throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
889  }
890  }
891 
892  if (inputShape.GetNumDimensions() > 4)
893  {
894  throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
895  }
896 
897  // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
898  // since the output tensor has an additional dimension.
899  if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
900  {
901  throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
902  "than the number of input dimensions.");
903  }
904 
905  // Output shape must be as inferred from the input shape
906  const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
907  for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
908  {
909  if (outputShape[i] != inputShape[i])
910  {
911  throw InvalidArgumentException(descriptorName + ": Output tensor must "
912  "match shape inferred from input tensor.");
913  }
914  }
915 
916  if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
917  {
918  throw InvalidArgumentException(descriptorName + ": Output tensor must "
919  "match shape inferred from input tensor.");
920  }
921 
922  for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
923  {
924  if (outputShape[i] != inputShape[i-1])
925  {
926  throw InvalidArgumentException(descriptorName + ": Output tensor must "
927  "match shape inferred from input tensor.");
928  }
929  }
930 
931  if (outputShape.GetNumDimensions() > 5)
932  {
933  throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
934  }
935 
936  // Check the supported data types
937  std::vector<DataType> supportedTypes =
938  {
946  };
947 
948  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
949 
950  for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
951  {
952  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
953  workloadInfo.m_InputTensorInfos[i],
954  descriptorName,
955  "input_0",
956  "input_" + std::to_string(i));
957  }
958 
959  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
960  workloadInfo.m_OutputTensorInfos[0],
961  descriptorName,
962  "input_0",
963  "output");
964 }
965 
967 {
968  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
969 
970  ValidateNumInputs(workloadInfo, descriptorName, 1);
971  ValidateNumOutputs(workloadInfo, descriptorName, 1);
972 
973  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
974  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
975 
976  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
977 
978  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
979  {
980  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
981  }
982 
983  ValidatePointer(m_Weight, descriptorName, "weight");
984 
985  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
986  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
987 
988  if (m_Parameters.m_BiasEnabled)
989  {
990  ValidatePointer(m_Bias, descriptorName, "bias");
991 
992  // Validates type and quantization values.
993  const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
994  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
995 
996  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
997  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
998  }
999 
1000  // Check the supported data types
1001  std::vector<DataType> supportedTypes =
1002  {
1009  };
1010 
1011  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1012  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1013 }
1014 
1016 {
1017  const std::string descriptorName{"NormalizationQueueDescriptor"};
1018 
1019  ValidateNumInputs(workloadInfo, descriptorName, 1);
1020  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1021 
1022  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1023  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1024 
1025  // Check the supported data types
1026  std::vector<DataType> supportedTypes =
1027  {
1033  };
1034 
1035  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1036 
1037  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1038 
1039  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1040 }
1041 
1042 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1043 {
1044  const std::string descriptorName{"AdditionQueueDescriptor"};
1045 
1046  ValidateNumInputs(workloadInfo, descriptorName, 2);
1047  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1048 
1049  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1050  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1051  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1052 
1053  std::vector<DataType> supportedTypes =
1054  {
1061  };
1062 
1063  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1064  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1065  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1066 
1067  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1068  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1069 
1070  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1071  inputTensorInfo1,
1072  outputTensorInfo,
1073  descriptorName,
1074  "input_0",
1075  "input_1");
1076 }
1077 
1079 {
1080  const std::string descriptorName{"MultiplicationQueueDescriptor"};
1081 
1082  ValidateNumInputs(workloadInfo, descriptorName, 2);
1083  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1084 
1085  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1086  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1087  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1088 
1089  std::vector<DataType> supportedTypes =
1090  {
1097  };
1098 
1099  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1100  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1101  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1102 
1103  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1104  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1105 
1106  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1107  inputTensorInfo1,
1108  outputTensorInfo,
1109  descriptorName,
1110  "input_0",
1111  "input_1");
1112 }
1113 
1115 {
1116  const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
1117 
1118  ValidateNumInputs(workloadInfo, descriptorName, 1);
1119  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1120 
1121  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1122  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1123 
1124  std::vector<DataType> supportedTypes =
1125  {
1131  };
1132 
1133  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1134  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1135 
1136  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1137  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1138 
1139  ValidatePointer(m_Mean, descriptorName, "mean");
1140  ValidatePointer(m_Variance, descriptorName, "variance");
1141  ValidatePointer(m_Beta, descriptorName, "beta");
1142  ValidatePointer(m_Gamma, descriptorName, "gamma");
1143 
1144  const TensorInfo& mean = m_Mean->GetTensorInfo();
1145  const TensorInfo& variance = m_Variance->GetTensorInfo();
1146  const TensorInfo& beta = m_Beta->GetTensorInfo();
1147  const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1148 
1149  ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1150  ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1151  ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1152  ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
1153 
1154  ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1155  ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1156  ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
1157 }
1158 
1160 {
1161  const std::string descriptorName{"Convolution2dQueueDescriptor"};
1162 
1163  ValidateNumInputs(workloadInfo, descriptorName, 1);
1164  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1165 
1166  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1167  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1168 
1169  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1170  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1171 
1172  ValidatePointer(m_Weight, descriptorName, "weight");
1173 
1174  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1175  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1176 
1177  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1178 
1179  Optional<TensorInfo> optionalBiasTensorInfo;
1180  if (m_Parameters.m_BiasEnabled)
1181  {
1182  ValidatePointer(m_Bias, descriptorName, "bias");
1183 
1184  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1185  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1186 
1187  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1188  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1189  }
1190 
1191  ValidatePerAxisQuantization(inputTensorInfo,
1192  outputTensorInfo,
1193  weightTensorInfo,
1194  optionalBiasTensorInfo,
1195  descriptorName);
1196 
1197  std::vector<DataType> supportedTypes =
1198  {
1206  };
1207 
1208  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1209  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1210 }
1211 
1213 {
1214  const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1215 
1216  ValidateNumInputs(workloadInfo, descriptorName, 1);
1217  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1218 
1219  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1220  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1221 
1222  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1223  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1224 
1225  ValidatePointer(m_Weight, descriptorName, "weight");
1226 
1227  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1228  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1229 
1230  if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1231  {
1233  boost::str(boost::format("%1%: dilationX (provided %2%) and dilationY (provided %3%) "
1234  "cannot be smaller than 1.") % descriptorName %
1235  m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1236  }
1237 
1238  const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1239 
1240  // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1241  // inputChannels * channelMultiplier should be equal to outputChannels.
1242  const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1243  const unsigned int numWeightInputChannels = weightTensorInfo.GetShape()[1];
1244  const unsigned int numWeightOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1245  if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1246  {
1248  boost::str(boost::format("%1%: output_channels (provided %2%) should be "
1249  "equal to input_channels (provided %3%) multiplied by channel_multiplier "
1250  "(provided %4%).") % descriptorName % numWeightOutputChannels %
1251  numWeightInputChannels % numWeightChannelMultiplier));
1252  }
1253 
1254  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1255 
1256  Optional<TensorInfo> optionalBiasTensorInfo;
1257  if (m_Parameters.m_BiasEnabled)
1258  {
1259  ValidatePointer(m_Bias, descriptorName, "bias");
1260 
1261  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1262  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1263 
1264  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1265  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1266  }
1267  ValidatePerAxisQuantization(inputTensorInfo,
1268  outputTensorInfo,
1269  weightTensorInfo,
1270  optionalBiasTensorInfo,
1271  descriptorName);
1272 
1273  std::vector<DataType> supportedTypes =
1274  {
1281  };
1282 
1283  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1284  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1285 }
1286 
1287 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1288 {
1289  const std::string descriptorName{"PermuteQueueDescriptor"};
1290 
1291  ValidateNumInputs(workloadInfo, descriptorName, 1);
1292  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1293 
1294  const PermutationVector& mapping = m_Parameters.m_DimMappings;
1295 
1296  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1297  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1298 
1299  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1300  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1301 
1302  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1303  {
1304  if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1305  {
1306  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1307  " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1308  "must match dst dimension " + to_string(mapping[i]) +
1309  " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1310  }
1311  }
1312 
1313  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1314 }
1315 
1316 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1317 {
1318  const std::string descriptorName{"Pooling2dQueueDescriptor"};
1319 
1320  ValidateNumInputs(workloadInfo, descriptorName, 1);
1321  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1322 
1323  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1324  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1325 
1326  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1327  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1328 
1329  std::vector<DataType> supportedTypes =
1330  {
1337  };
1338 
1339  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1340  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1341 }
1342 
1344 {
1345  const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1346 
1347  ValidateNumInputs(workloadInfo, descriptorName, 1);
1348  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1349 
1350  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1351  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1352 
1353  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1354  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1355 
1356  std::vector<DataType> supportedTypes =
1357  {
1363  };
1364 
1365  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1366  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1367 
1368  // ResizeBilinear only changes width and height: batch and channel count must match.
1369  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1370  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1371  if (inputBatchSize != outputBatchSize)
1372  {
1374  boost::str(boost::format("%1%: Input batch size (%2%) "
1375  "does not match output batch size (%3%)") %
1376  descriptorName % inputBatchSize % outputBatchSize));
1377  }
1378 
1379  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1380  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1381  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1382  if (inputChannelCount != outputChannelCount)
1383  {
1385  boost::str(boost::format("%1%: Input channel count (%2%) "
1386  "does not match output channel count (%3%)") %
1387  descriptorName % inputChannelCount % outputChannelCount));
1388  }
1389 }
1390 
1391 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1392 {
1393  const std::string descriptorName{"ResizeQueueDescriptor"};
1394 
1395  ValidateNumInputs(workloadInfo, descriptorName, 1);
1396  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1397 
1398  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1399  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1400 
1401  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1402  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1403 
1404  std::vector<DataType> supportedTypes =
1405  {
1412  };
1413 
1414  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1415  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1416 
1417  // Resize only changes width and height: batch and channel count must match.
1418  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1419  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1420  if (inputBatchSize != outputBatchSize)
1421  {
1423  boost::str(boost::format("%1%: Input batch size (%2%) "
1424  "does not match output batch size (%3%)") %
1425  descriptorName % inputBatchSize % outputBatchSize));
1426  }
1427 
1428  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1429  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1430  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1431  if (inputChannelCount != outputChannelCount)
1432  {
1434  boost::str(boost::format("%1%: Input channel count (%2%) "
1435  "does not match output channel count (%3%)") %
1436  descriptorName % inputChannelCount % outputChannelCount));
1437  }
1438 }
1439 
1441 {
1442  const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1443 
1444  ValidateNumInputs(workloadInfo, descriptorName, 1);
1445  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1446 
1447  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1448  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1449 
1450  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1451  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1452 
1453  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1454 
1455  if (m_Parameters.m_Min > m_Parameters.m_Max)
1456  {
1457  throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1458  }
1459 }
1460 
1462 {
1463  const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1464 
1465  ValidateNumInputs(workloadInfo, descriptorName, 1);
1466  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1467 
1468  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1469  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1470 
1471  if (inputTensorInfo.GetNumDimensions() > 4)
1472  {
1473  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1474  }
1475 
1476  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1477 
1478  // Check the supported data types
1479  std::vector<DataType> supportedTypes =
1480  {
1484  };
1485 
1486  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1487  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1488 }
1489 
1491 {
1492  const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1493 
1494  ValidateNumInputs(workloadInfo, descriptorName, 1);
1495  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1496 
1497  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1498  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1499 
1500  if (inputTensorInfo.GetNumDimensions() > 4)
1501  {
1502  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1503  }
1504 
1505  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1506 
1507  // Check the supported data types
1508  std::vector<DataType> supportedTypes =
1509  {
1515  };
1516 
1517  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1518  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1519 }
1520 
1521 void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1522 {
1523  const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1524 
1525  ValidateNumInputs(workloadInfo, descriptorName, 1);
1526  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1527 
1528  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1529  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1530 
1531  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1532 
1533  std::vector<DataType> supportedTypes =
1534  {
1538  };
1539 
1540  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1541  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1542 }
1543 
1544 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1545 {
1546  const std::string descriptorName{"ConstantQueueDescriptor"};
1547 
1548  ValidateNumInputs(workloadInfo, descriptorName, 0);
1549  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1550 
1551  if (!m_LayerOutput)
1552  {
1553  throw InvalidArgumentException(descriptorName + ": No const input specified.");
1554  }
1555 
1556  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1557  ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1558 
1559  // Check the supported data types
1560  std::vector<DataType> supportedTypes =
1561  {
1570  };
1571 
1572  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1573 }
1574 
1575 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1576 {
1577  const std::string descriptorName{"ReshapeQueueDescriptor"};
1578 
1579  ValidateNumInputs(workloadInfo, descriptorName, 1);
1580  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1581 
1582  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1583  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1584 
1585  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1586 
1587  // Check the supported data types
1588  std::vector<DataType> supportedTypes =
1589  {
1597  };
1598 
1599  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1600  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1601 }
1602 
1604 {
1605  const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1606 
1607  ValidateNumInputs(workloadInfo, descriptorName, 1);
1608  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1609 
1610  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1611  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1612 
1613  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1614  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1615 
1616  if (m_Parameters.m_BlockShape.size() != 2)
1617  {
1618  throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1619  }
1620 
1621  if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1622  {
1623  throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1624  "dimensions as Block Shape.");
1625  }
1626 
1627  const TensorShape& inputShape = inputTensorInfo.GetShape();
1628 
1629  std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1630  std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1631 
1632  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1633 
1634  const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1635  widthPad.first + widthPad.second;
1636  const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1637  heightPad.first + heightPad.second;
1638 
1639  const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1640  inputShape[dimensionIndices.GetChannelsIndex()];
1641  const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1642 
1643  if (numOutputElements != numInputElements)
1644  {
1645  throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1646  to_string(numInputElements) + " after padding but output tensor has " +
1647  to_string(numOutputElements) + " elements.");
1648  }
1649 
1650  if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1651  {
1652  throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1653  "divisible by Block Shape in all spatial dimensions");
1654  }
1655 
1656  std::vector<DataType> supportedTypes =
1657  {
1663  };
1664 
1665  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1666  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1667 }
1668 
1670 {
1671  const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1672 
1673  ValidateNumInputs(workloadInfo, descriptorName, 1);
1674  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1675 
1676  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1677  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1678 
1679  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1680  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1681 
1682  std::vector<DataType> supportedTypes =
1683  {
1689  };
1690 
1691  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1692  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1693 
1694  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1695 
1696  if (m_Parameters.m_BlockSize == 0)
1697  {
1698  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1699  }
1700 
1701  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1702  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1703  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1704  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1705 
1706  const TensorShape& inputShape = inputTensorInfo.GetShape();
1707  if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1708  {
1709  throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1710  "by block size in all spatial dimensions");
1711  }
1712 
1713  const TensorShape& outputShape = outputTensorInfo.GetShape();
1714  if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1715  {
1716  throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1717  "must be divisible by the square of block size." );
1718  }
1719 }
1720 
1721 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1722 {
1723  const std::string descriptorName{"FloorQueueDescriptor"};
1724 
1725  ValidateNumInputs(workloadInfo, descriptorName, 1);
1726  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1727 
1728  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1729  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1730 
1731  std::vector<DataType> supportedTypes =
1732  {
1737  };
1738 
1739  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1740 
1741  if (inputTensorInfo != outputTensorInfo)
1742  {
1743  throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1744  }
1745 }
1746 
1747 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1748 {
1749  // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1750 
1751  const std::string descriptorName{"LstmQueueDescriptor"};
1752 
1753  // check dimensions of all inputs and outputs
1754  if (workloadInfo.m_InputTensorInfos.size() != 3)
1755  {
1756  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1757  }
1758  if (workloadInfo.m_OutputTensorInfos.size() != 4)
1759  {
1760  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1761  }
1762 
1763  std::vector<DataType> supportedTypes =
1764  {
1769  };
1770 
1771  // check for supported type of one input and match them with all the other input and output
1772  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1773 
1774  // type matches all other inputs
1775  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1776  {
1777  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1778  workloadInfo.m_InputTensorInfos[i],
1779  descriptorName,
1780  "input_0",
1781  "input_" + std::to_string(i));
1782  }
1783  // type matches all other outputs
1784  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1785  {
1786  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1787  workloadInfo.m_OutputTensorInfos[i],
1788  "LstmQueueDescriptor",
1789  "input_0",
1790  "output_" + std::to_string(i));
1791  }
1792 
1793  // Making sure clipping parameters have valid values.
1794  // == 0 means no clipping
1795  // > 0 means clipping
1796  if (m_Parameters.m_ClippingThresCell < 0.0f)
1797  {
1798  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
1799  }
1800  if (m_Parameters.m_ClippingThresProj < 0.0f)
1801  {
1802  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
1803  }
1804 
1805 
1806  // Inferring batch size, number of outputs and number of cells from the inputs.
1807  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1808  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1809  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1810  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1811  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1812  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1813 
1814  // input tensor
1815  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1816  descriptorName + " input_0");
1817  // outputStateInTensor
1818  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1819  descriptorName + " input_1");
1820  // outputStateInTensor
1821  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1822  descriptorName + " input_2");
1823  // scratchBufferTensor
1824  unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1825  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1826  descriptorName + " output_0");
1827  // outputStateOutTensor
1828  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1829  descriptorName + " output_1");
1830  // cellStateOutTensor
1831  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1832  descriptorName + " output_2");
1833  // outputTensor
1834  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1835  descriptorName + " output_3");
1836 
1837 
1838  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1839  if ( m_InputToInputWeights )
1840  {
1841  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1842  (n_cell * n_input), "InputLayerNormWeights");
1843  }
1844 
1845  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1846  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1847  (n_cell * n_input), "InputToForgetWeights");
1848 
1849  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1850  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1851  (n_cell * n_input), "InputToCellWeights");
1852 
1853  if ( m_RecurrentToInputWeights )
1854  {
1855  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1856  (n_cell * n_output), "RecurrentToInputWeights");
1857  }
1858 
1859  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1860  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1861  (n_cell * n_output), "RecurrentToForgetWeights");
1862 
1863  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1864  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1865  (n_cell * n_output), "RecurrentToCellWeights");
1866 
1867  // Make sure the input-gate's parameters are either both present (regular
1868  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1869  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1870  !m_Parameters.m_CifgEnabled) ||
1871  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1872  m_Parameters.m_CifgEnabled));
1873  if (!cifg_weights_all_or_none)
1874  {
1875  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
1876  "RecurrentToInputWeights must either both be present (regular LSTM) "
1877  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
1878  "accordingly.");
1879  }
1880 
1881  if ( m_CellToInputWeights )
1882  {
1883  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1884  n_cell, "CellToInputWeights");
1885  }
1886  if ( m_CellToForgetWeights )
1887  {
1888  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1889  n_cell, "CellToForgetWeights");
1890  }
1891  if ( m_CellToOutputWeights )
1892  {
1893  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1894  n_cell, "CellToOutputWeights");
1895  }
1896 
1897  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1898  bool peephole_weights_all_or_none =
1899  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1900  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1901  || ( !m_CellToInputWeights && !m_CellToForgetWeights
1902  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1903  if (!peephole_weights_all_or_none)
1904  {
1905  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
1906  }
1907 
1908  // Make sure the input gate bias is present only when not a CIFG-LSTM.
1909  if (m_Parameters.m_CifgEnabled)
1910  {
1911  if (m_InputGateBias)
1912  {
1913  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
1914  }
1915  }
1916  else
1917  {
1918  if (!m_InputGateBias)
1919  {
1920  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
1921  "must be present.");
1922  }
1923  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1924  n_cell, "InputGateBias");
1925  }
1926 
1927  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
1928  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
1929 
1930  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
1931  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
1932 
1933  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
1934  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
1935 
1936  if (m_ProjectionWeights)
1937  {
1938  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1939  (n_cell * n_output), "ProjectionWeights");
1940  }
1941  if (m_ProjectionBias)
1942  {
1943  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
1944  }
1945 
1946  // Making sure the projection tensors are consistent:
1947  // 1) If projection weight is not present, then projection bias should not be
1948  // present.
1949  // 2) If projection weight is present, then projection bias is optional.
1950  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1951  !m_Parameters.m_ProjectionEnabled)
1952  || (m_ProjectionWeights && !m_ProjectionBias &&
1953  m_Parameters.m_ProjectionEnabled)
1954  || (m_ProjectionWeights && m_ProjectionBias &&
1955  m_Parameters.m_ProjectionEnabled));
1956  if (!projecton_tensors_consistent)
1957  {
1958  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
1959  }
1960 
1961  // The four layer normalization weights either all have values or none of them have values. Additionally, if
1962  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
1963  // either all have values or none of them have values. Layer normalization is used when the values of all the
1964  // layer normalization weights are present
1965  if (m_InputLayerNormWeights)
1966  {
1967  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
1968  }
1969  if (m_ForgetLayerNormWeights)
1970  {
1971  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1972  }
1973  if (m_CellLayerNormWeights)
1974  {
1975  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1976  }
1977  if (m_OutputLayerNormWeights)
1978  {
1979  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1980  }
1981 
1982  if (m_Parameters.m_LayerNormEnabled)
1983  {
1984  if (!m_Parameters.m_CifgEnabled)
1985  {
1986  if (!m_InputLayerNormWeights)
1987  {
1988  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
1989  "disabled but InputLayerNormWeights are not present");
1990  }
1991  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1992  1, n_cell, "InputLayerNormWeights");
1993  }
1994  else if (m_InputLayerNormWeights)
1995  {
1996  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
1997  "enabled");
1998  }
1999 
2000  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
2001  "ForgetLayerNormWeights");
2002  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2003 
2004  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
2005  "OutputLayerNormWeights");
2006  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2007 
2008  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
2009  "CellLayerNormWeights");
2010  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2011  }
2012  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2013  {
2014  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
2015  "normalisation weights are present.");
2016  }
2017 }
2018 
2020 {
2021  const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
2022 
2023  ValidateNumInputs(workloadInfo, descriptorName, 1);
2024  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2025 
2026  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2027  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2028 
2029  if (inputTensorInfo.GetDataType() != DataType::Float32)
2030  {
2031  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2032  }
2033 
2034  if (outputTensorInfo.GetDataType() != DataType::Float16)
2035  {
2036  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
2037  }
2038 
2039  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2040 }
2041 
2043 {
2044  const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
2045 
2046  ValidateNumInputs(workloadInfo, descriptorName, 1);
2047  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2048 
2049  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2050  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2051 
2052  if (inputTensorInfo.GetDataType() != DataType::Float16)
2053  {
2054  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
2055  }
2056 
2057  if (outputTensorInfo.GetDataType() != DataType::Float32)
2058  {
2059  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2060  }
2061 
2062  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2063 }
2064 
2065 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2066 {
2067  const std::string descriptorName{"DivisionQueueDescriptor"};
2068 
2069  ValidateNumInputs(workloadInfo, descriptorName, 2);
2070  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2071 
2072  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2073  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2074  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2075 
2076  std::vector<DataType> supportedTypes =
2077  {
2083  };
2084 
2085  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2086  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2087  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2088 
2089  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2090  inputTensorInfo1,
2091  outputTensorInfo,
2092  descriptorName,
2093  "input_0",
2094  "input_1");
2095 }
2096 
2098 {
2099  const std::string descriptorName{"SubtractionQueueDescriptor"};
2100 
2101  ValidateNumInputs(workloadInfo, descriptorName, 2);
2102  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2103 
2104  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2105  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2106  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2107 
2108  std::vector<DataType> supportedTypes =
2109  {
2115  };
2116 
2117  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2118  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2119  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2120 
2121  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2122  inputTensorInfo1,
2123  outputTensorInfo,
2124  descriptorName,
2125  "input_0",
2126  "input_1");
2127 }
2128 
2129 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2130 {
2131  const std::string descriptorName{"MaximumQueueDescriptor"};
2132 
2133  ValidateNumInputs(workloadInfo, descriptorName, 2);
2134  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2135 
2136  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2137  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2138  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2139 
2140  std::vector<DataType> supportedTypes =
2141  {
2149  };
2150 
2151  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2152  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2153  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2154 
2155  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2156  inputTensorInfo1,
2157  outputTensorInfo,
2158  descriptorName,
2159  "input_0",
2160  "input_1");
2161 }
2162 
2163 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2164 {
2165  const std::string descriptorName{"MeanQueueDescriptor"};
2166 
2167  ValidateNumInputs(workloadInfo, descriptorName, 1);
2168  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2169 
2170  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2171  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2172 
2173  std::vector<DataType> supportedTypes =
2174  {
2180  };
2181 
2182  // First check if input tensor data type is supported, then
2183  // check if this data type matches the output tensor data type
2184  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2185  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2186 
2187  if (m_Parameters.m_KeepDims)
2188  {
2189  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2190  }
2191  else if (m_Parameters.m_Axis.empty())
2192  {
2193  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
2194  }
2195  else
2196  {
2197  unsigned int outputDim =
2198  inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
2199  ValidateTensorNumDimensions(outputTensorInfo,
2200  descriptorName,
2201  outputDim > 0 ? outputDim : 1,
2202  "output");
2203  }
2204 }
2205 
2206 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2207 {
2208  const std::string descriptorName{"PadQueueDescriptor"};
2209 
2210  ValidateNumInputs(workloadInfo, descriptorName, 1);
2211  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2212 
2213  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2214  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2215 
2216  // input and output should have the same number of dimensions
2217  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2218 
2219  // there should be entry in the pad list for each dimension in the input tensor
2220  if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2221  throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2222  "as there are dimensions in the input tensor that is " +
2223  std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2224  " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
2225  }
2226 }
2227 
2228 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2229 {
2230  const std::string descriptorName{"QuantizeQueueDescriptor"};
2231 
2232  ValidateNumInputs(workloadInfo, descriptorName, 1);
2233  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2234 
2235  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2236  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2237 
2238  std::vector<DataType> supportedTypes =
2239  {
2247  };
2248 
2249  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2250 
2251  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2252  {
2253  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2254  }
2255 }
2256 
2258 {
2259  const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
2260 
2261  ValidateNumInputs(workloadInfo, descriptorName, 1);
2262  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2263 
2264  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2265  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2266 
2267  std::vector<DataType> supportedTypes =
2268  {
2274  };
2275 
2276  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2277  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2278 }
2279 
2281 {
2282  const std::string descriptorName{"StridedSliceQueueDescriptor"};
2283 
2284  ValidateNumInputs(workloadInfo, descriptorName, 1);
2285  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2286 
2287  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2288  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2289 
2290  std::vector<DataType> supportedTypes =
2291  {
2297  };
2298 
2299  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2300  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2301 
2302  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2303 
2304  const uint32_t rank = inputTensorInfo.GetNumDimensions();
2305  if (rank > 4)
2306  {
2307  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2308  }
2309 
2310  // Begin, End & Stride length must be of rank(input0)
2311  if (m_Parameters.m_Begin.size() != rank)
2312  {
2313  throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2314  }
2315 
2316  if (m_Parameters.m_End.size() != rank)
2317  {
2318  throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2319  }
2320 
2321  if (m_Parameters.m_Stride.size() != rank)
2322  {
2323  throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2324  }
2325 
2326  // Stride entries must be non-zero
2327  for (auto& stride : m_Parameters.m_Stride)
2328  {
2329  if (stride == 0)
2330  {
2331  throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2332  }
2333  }
2334 }
2335 
2336 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2337 {
2338  const std::string descriptorName{"MinimumQueueDescriptor"};
2339 
2340  ValidateNumInputs(workloadInfo, descriptorName, 2);
2341  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2342 
2343  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2344  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2345  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2346 
2347  std::vector<DataType> supportedTypes =
2348  {
2355  };
2356 
2357  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2358  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2359  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2360 
2361  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2362  inputTensorInfo1,
2363  outputTensorInfo,
2364  descriptorName,
2365  "input_0",
2366  "input_1");
2367 }
2368 
2369 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2370 {
2371  const std::string descriptorName{"DebugQueueDescriptor"};
2372 
2373  ValidateNumInputs(workloadInfo, descriptorName, 1);
2374  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2375 }
2376 
2377 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2378 {
2379  const std::string descriptorName{"EqualQueueDescriptor"};
2380 
2381  ValidateNumInputs(workloadInfo, descriptorName, 2);
2382  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2383 
2384  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2385  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2386  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2387 
2388  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2389  inputTensorInfo1,
2390  outputTensorInfo,
2391  descriptorName,
2392  "input_0",
2393  "input_1");
2394 
2395  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2396  {
2397  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2398  }
2399 }
2400 
2401 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2402 {
2403  const std::string descriptorName{"GreaterQueueDescriptor"};
2404 
2405  ValidateNumInputs(workloadInfo, descriptorName, 2);
2406  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2407 
2408  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2409  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2410  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2411 
2412  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2413  inputTensorInfo1,
2414  outputTensorInfo,
2415  descriptorName,
2416  "input_0",
2417  "input_1");
2418 
2419  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2420  {
2421  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2422  }
2423 }
2424 
2425 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2426 {
2427  const std::string descriptorName{"RsqrtQueueDescriptor"};
2428 
2429  ValidateNumInputs(workloadInfo, descriptorName, 1);
2430  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2431 
2432  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2433  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2434 
2435  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2436 
2437  std::vector<DataType> supportedTypes =
2438  {
2444  };
2445 
2446  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2447  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2448 }
2449 
2450 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2451 {
2452  const std::string descriptorName{"GatherQueueDescriptor"};
2453 
2454  ValidateNumInputs(workloadInfo, descriptorName, 2);
2455  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2456 
2457  const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2458  if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2459  {
2460  throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2461  }
2462 
2463  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2464  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2465 
2466  std::vector<DataType> supportedTypes =
2467  {
2473  };
2474 
2475  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2476 
2477  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2478 
2479  unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2480  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2481 }
2482 
2484 {
2485  const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2486 
2487  ValidateNumInputs(workloadInfo, descriptorName, 2);
2488 
2489  if (workloadInfo.m_OutputTensorInfos.size() != 4)
2490  {
2491  throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2492  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2493  }
2494 
2495  if (m_Anchors == nullptr)
2496  {
2497  throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2498  }
2499 
2500  const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2501  const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2502  const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2503 
2504  const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2505  const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2506  const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2507  const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2508 
2509  ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2510  ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2511  ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2512 
2513  const std::vector<DataType> supportedInputTypes =
2514  {
2520  };
2521 
2522  ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2523  ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2524  ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2525 
2526  ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2527  ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2528  ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2529  ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2530 
2531  // NOTE: Output is always Float32 regardless of input type
2532  ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2533  ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2534  ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2535  ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2536 
2537  if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2538  {
2539  throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2540  "must be positive and less than or equal to 1.");
2541  }
2542 
2543  if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2544  {
2545  throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2546  "should be equal to number of classes + 1.");
2547  }
2548 }
2549 
2550 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2551 {
2552  const std::string& descriptorName{"DequantizeQueueDescriptor"};
2553 
2554  ValidateNumInputs(workloadInfo, descriptorName, 1);
2555  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2556 
2557  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2558  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2559 
2560  if (!IsQuantizedType(inputTensorInfo.GetDataType()))
2561  {
2562  throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2563  }
2564 
2565  std::vector<DataType> supportedTypes =
2566  {
2570  };
2571 
2572  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2573 }
2574 
2575 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2576 {
2577  const std::string& descriptorName{"MergeQueueDescriptor"};
2578 
2579  ValidateNumInputs(workloadInfo, descriptorName, 2);
2580  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2581 
2582  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2583  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2584  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2585 
2586  ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2587  ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2588 
2589  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2590  ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2591 }
2592 
2593 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2594 {
2595  const std::string& descriptorName{"SwitchQueueDescriptor"};
2596 
2597  ValidateNumInputs(workloadInfo, descriptorName, 2);
2598  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2599 
2600  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2601  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2602 
2603  const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2604  const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2605 
2606  std::vector<DataType> supportedTypes =
2607  {
2612  };
2613 
2614  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2615  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2616 
2617  ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2618  ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2619 
2620  ValidateTensorShapesMatch(inputTensorInfo0,
2621  outputTensorInfo0,
2622  descriptorName,
2623  "input_0",
2624  "output_0");
2625 
2626  ValidateTensorShapesMatch(inputTensorInfo0,
2627  outputTensorInfo1,
2628  descriptorName,
2629  "input_0",
2630  "output_1");
2631 }
2632 
2633 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
2634 {
2635  // This is internally generated so it should not need validation.
2636 }
2637 
2638 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2639 {
2640  const std::string& descriptorName{"PreluQueueDescriptor"};
2641 
2642  ValidateNumInputs(workloadInfo, descriptorName, 2);
2643  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2644 
2645  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2646  const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2647  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2648 
2649  std::vector<DataType> supportedTypes
2650  {
2656  };
2657 
2658  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2659  ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2660 
2661  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2662 
2663  ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2664  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2665 
2666  ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2667  alphaTensorInfo,
2668  outputTensorInfo,
2669  descriptorName,
2670  "input",
2671  "alpha");
2672 }
2673 
2675 {
2676  const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2677 
2678  ValidateNumInputs(workloadInfo, descriptorName, 1);
2679  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2680 
2681  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2682  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2683 
2684  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2685  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2686 
2687  ValidatePointer(m_Weight, descriptorName, "weight");
2688 
2689  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2690  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2691 
2692  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2693 
2694  Optional<TensorInfo> optionalBiasTensorInfo;
2695  if (m_Parameters.m_BiasEnabled)
2696  {
2697  ValidatePointer(m_Bias, descriptorName, "bias");
2698 
2699  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2700  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
2701 
2702  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
2703  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2704  }
2705 
2706  ValidatePerAxisQuantization(inputTensorInfo,
2707  outputTensorInfo,
2708  weightTensorInfo,
2709  optionalBiasTensorInfo,
2710  descriptorName);
2711 
2712  std::vector<DataType> supportedTypes =
2713  {
2719  };
2720 
2721  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2722  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2723 }
2724 
2725 void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2726 {
2727  const std::string descriptorName{"TransposeQueueDescriptor"};
2728 
2729  ValidateNumInputs(workloadInfo, descriptorName, 1);
2730  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2731 
2732  const PermutationVector& mapping = m_Parameters.m_DimMappings;
2733 
2734  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2735  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2736 
2737  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
2738  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
2739 
2740  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
2741  {
2742  if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
2743  {
2744  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
2745  " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
2746  "must match dst dimension " + to_string(i) +
2747  " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
2748  }
2749  }
2750 
2751  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2752 }
2753 
2755 {
2756  const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
2757 
2758  // Validate number of inputs/outputs
2759  ValidateNumInputs(workloadInfo, descriptorName, 3);
2760  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2761 
2762  // Input/output tensor infos
2763  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2764  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
2765  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
2766 
2767  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2768  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2769 
2770  std::vector<DataType> inputOutputSupportedTypes =
2771  {
2773  };
2774 
2775  std::vector<DataType> cellStateSupportedTypes =
2776  {
2778  };
2779 
2780  std::vector<DataType> weightsSupportedTypes =
2781  {
2783  };
2784 
2785  std::vector<DataType> biasSupportedTypes =
2786  {
2788  };
2789 
2790  // Validate types of input/output tensors
2791  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2792  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2793  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2794 
2795  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2796  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2797 
2798  // Validate matching types of input/output tensors
2799  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2800  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2801  "outputStateIn", "outputStateOut");
2802  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2803 
2804  // Validate matching quantization info for input/output tensors
2805  ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2806  ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
2807  ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2808 
2809  // Infer number of batches, input size and output size from tensor dimensions
2810  const uint32_t numBatches = inputInfo.GetShape()[0];
2811  const uint32_t inputSize = inputInfo.GetShape()[1];
2812  const uint32_t outputSize = cellStateInInfo.GetShape()[1];
2813 
2814  // Validate number of dimensions and number of elements for input/output tensors
2815  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
2816  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
2817  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
2818  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
2819  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
2820 
2821  // Validate number of dimensions and number of elements for weights tensors
2822  ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
2823  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
2824  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
2825 
2826  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
2827  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2828  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
2829 
2830  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
2831  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2832  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
2833 
2834  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
2835  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2836  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
2837 
2838  ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
2839  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
2840  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
2841 
2842  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
2843  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2844  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
2845  " RecurrentToForgetWeights");
2846 
2847  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
2848  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2849  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2850 
2851  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
2852  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2853  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2854 
2855  // Validate data types for weights tensors (all should match each other)
2856  ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
2857 
2858  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
2859  "inputToInputWeights", "inputToForgetWeights");
2860  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
2861  "inputToInputWeights", "inputToCellWeights");
2862  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2863  "inputToInputWeights", "inputToOutputWeights");
2864 
2865  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
2866  "inputToInputWeights", "recurrentToInputWeights");
2867  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2868  "inputToInputWeights", "recurrentToForgeteights");
2869  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2870  "inputToInputWeights", "recurrentToCellWeights");
2871  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2872  "inputToInputWeights", "recurrentToOutputWeights");
2873 
2874  // Validate matching quantization info for weight tensors (all should match each other)
2875  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
2876  descriptorName, "inputToInputWeights", "inputToForgetWeights");
2877  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
2878  descriptorName, "inputToInputWeights", "inputToCellWeights");
2879  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
2880  descriptorName, "inputToInputWeights", "inputToOutputWeights");
2881 
2882  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
2883  descriptorName, "inputToInputWeights", "recurrentToInputWeights");
2884  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
2885  descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
2886  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
2887  descriptorName, "inputToInputWeights", "recurrentToCellWeights");
2888  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
2889  descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
2890 
2891  // Validate number of dimensions and number of elements in bias tensors
2892  ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
2893  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
2894  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
2895 
2896  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
2897  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2898  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
2899 
2900  ValidatePointer(m_CellBias, descriptorName, "CellBias");
2901  auto cellBiasInfo = m_CellBias->GetTensorInfo();
2902  ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
2903 
2904  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
2905  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2906  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
2907 
2908  // Validate data types for bias tensors (all should match each other)
2909  ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
2910 
2911  ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
2912  "inputGateBias", "forgetGateBias");
2913  ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
2914  "inputGateBias", "cellBias");
2915  ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
2916  "inputGateBias", "outputGateBias");
2917 
2918  // Validate bias tensor quantization info
2919  ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2920  ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2921  ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2922  ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2923 }
2924 
2925 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2926 {
2927  const std::string descriptorName{"AbsQueueDescriptor"};
2928 
2929  ValidateNumInputs(workloadInfo, descriptorName, 1);
2930  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2931 
2932  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2933  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2934 
2935  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2936 
2937  std::vector<DataType> supportedTypes =
2938  {
2944  };
2945 
2946  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2947  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2948 }
2949 
2950 void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2951 {
2952  const std::string descriptorName{"SliceQueueDescriptor"};
2953 
2954  ValidateNumInputs(workloadInfo, descriptorName, 1);
2955  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2956 
2957  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2958  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2959 
2960  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2961 
2962  const unsigned int rank = inputTensorInfo.GetNumDimensions();
2963  if (rank > 4)
2964  {
2965  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2966  }
2967 
2968  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
2969 
2970  // Check if m_Begin and m_Size have the expected length
2971  if (m_Parameters.m_Begin.size() != rank)
2972  {
2973  throw InvalidArgumentException(descriptorName +
2974  ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
2975  }
2976  if (m_Parameters.m_Size.size() != rank)
2977  {
2978  throw InvalidArgumentException(descriptorName +
2979  ": Length of size descriptor must equal rank " + std::to_string(rank));
2980  }
2981 
2982  // Check if the shape of the output tensor matches m_Size
2983  const TensorShape& outputShape = outputTensorInfo.GetShape();
2984  for (unsigned int i = 0u; i < rank; ++i)
2985  {
2986  if (m_Parameters.m_Size[i] != outputShape[i])
2987  {
2988  throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
2989  }
2990  }
2991 
2992  // Check if the sum of begin offset and size in a given dimension
2993  // does not exceed the size of corresponding input
2994  const TensorShape& inputShape = inputTensorInfo.GetShape();
2995  for(unsigned int i = 0u; i < rank; ++i)
2996  {
2997  if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
2998  {
2999  throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
3000  std::to_string(i) + " exceeds input size.");
3001  }
3002  }
3003 }
3004 
3006 {
3007  const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
3008 
3009  ValidateNumInputs(workloadInfo, descriptorName, 1);
3010  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3011 
3012  const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
3013  const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
3014 
3015  ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
3016  ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
3017 
3018  std::vector<DataType> supportedTypes =
3019  {
3025  };
3026 
3027  ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3028  ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3029 
3030  ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
3031 
3032  if (m_Parameters.m_BlockSize == 0)
3033  {
3034  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
3035  }
3036 
3037  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
3038  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
3039  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
3040  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
3041 
3042  const TensorShape& outputShape = outputInfo.GetShape();
3043  if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3044  {
3045  throw InvalidArgumentException(descriptorName + ": Output width and height shape"
3046  "must be divisible by block size.");
3047  }
3048 
3049  const TensorShape& inputShape = inputInfo.GetShape();
3050  if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3051  {
3052  throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
3053  "must be divisible by the square of block size." );
3054  }
3055 }
3056 
3057 void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3058 {
3059  const std::string descriptorName{"ComparisonQueueDescriptor"};
3060 
3061  ValidateNumInputs(workloadInfo, descriptorName, 2);
3062  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3063 
3064  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3065  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3066  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3067 
3068  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3069  inputTensorInfo1,
3070  outputTensorInfo,
3071  descriptorName,
3072  "input_0",
3073  "input_1");
3074 
3075  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3076  {
3077  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3078  }
3079 }
3080 
3082 {
3083  const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3084 
3085  ValidateNumInputs(workloadInfo, descriptorName, 1);
3086  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3087 
3088  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3089  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3090 
3091  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3092 
3093  std::vector<DataType> supportedTypes =
3094  {
3100  };
3101 
3102  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3103  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3104 }
3105 
3106 } // namespace armnn
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:218
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:236
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:232
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:280
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2020 ARM Limited.
void Validate(const WorkloadInfo &workloadInfo) const
SizeType GetSize() const
Definition: Types.hpp:202
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:237
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:98
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:168
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:241
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:264
float GetQuantizationScale() const
Definition: Tensor.cpp:247
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
Definition: Tensor.hpp:95
bool has_value() const noexcept
Definition: Optional.hpp:53
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
void Validate(const WorkloadInfo &workloadInfo) const
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
Contains information about inputs and outputs to a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
unsigned int GetChannelsIndex() const
bool IsQuantized() const
Definition: Tensor.cpp:290
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin