ArmNN  NotReleased
WorkloadData.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
10 
11 #include <algorithm>
12 #include <iomanip>
13 #include <string>
14 #include <sstream>
15 
16 #include <boost/format.hpp>
17 #include <boost/numeric/conversion/cast.hpp>
18 
19 using namespace armnnUtils;
20 
21 namespace armnn
22 {
23 
24 //---------------------------------------------------------------
26 {
27  switch (inputDataType)
28  {
29  case DataType::Float16:
30  return DataType::Float16;
31  case DataType::Float32:
32  return DataType::Float32;
33  case DataType::QAsymmS8:
34  return DataType::Signed32;
35  case DataType::QAsymmU8:
36  return DataType::Signed32;
37  case DataType::QSymmS8:
38  return DataType::Signed32;
39  case DataType::QSymmS16:
40  return DataType::Signed32;
41  default:
42  BOOST_ASSERT_MSG(false, "Invalid input data type");
43  return DataType::Float32;
44  }
45 }
46 
47 namespace
48 {
49 
50 //---------------------------------------------------------------
51 //android ndk does not support std::to_string function.
52 template <typename T>
53 std::string to_string(T value)
54 {
55  std::ostringstream os;
56  os << value;
57  return os.str();
58 }
59 
60 //---------------------------------------------------------------
61 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
62 {
63  if (!ptr)
64  {
65  throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
66  paramName + " parameter must be set.");
67  }
68 }
69 
70 //---------------------------------------------------------------
71 void ValidateTensorShapesMatch(const TensorInfo& first,
72  const TensorInfo& second,
73  std::string const& descName,
74  std::string const& firstName,
75  std::string const& secondName)
76 {
77  if (first.GetShape() != second.GetShape())
78  {
79  throw InvalidArgumentException(descName + ": "
80  + firstName + " & " + secondName + " must have identical shapes");
81  }
82 }
83 
84 //---------------------------------------------------------------
85 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
86 {
87  if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
88  {
89  throw InvalidArgumentException(descName +
90  ": Requires exactly " + to_string(expectedSize) + "input(s). " +
91  to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
92  }
93 }
94 
95 //---------------------------------------------------------------
96 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
97 {
98  if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
99  {
100  throw InvalidArgumentException(descName +
101  ": Requires exactly " + to_string(expectedSize) + " output(s). " +
102  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
103  }
104 }
105 
106 //---------------------------------------------------------------
107 void ValidateTensorNumDimensions(const TensorInfo& tensor,
108  std::string const& descName,
109  unsigned int numDimensions,
110  std::string const& tensorName)
111 {
112  if (tensor.GetNumDimensions() != numDimensions)
113  {
114  throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
115  to_string(tensor.GetNumDimensions()) + " dimensions for " +
116  tensorName + " tensor.");
117  }
118 }
119 
120 //---------------------------------------------------------------
121 void ValidateTensorNumElements(const TensorInfo& tensor,
122  std::string const& descName,
123  unsigned int numElements,
124  std::string const& tensorName)
125 {
126  if (tensor.GetNumElements() != numElements)
127  {
128  throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
129  to_string(tensor.GetNumElements()) + " elements for " +
130  tensorName + " tensor.");
131  }
132 }
133 
134 //---------------------------------------------------------------
135 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
136  unsigned int numDimension,
137  unsigned int numElements,
138  std::string const& tensorName)
139 {
140  const std::string functionName{"ValidateTensorNumDimNumElem"};
141  ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
142  ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
143 }
144 
145 //---------------------------------------------------------------
146 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
147  const std::string& descName, std::string const& tensorName)
148 {
149  if (tensor.GetDataType() != dataType)
150  {
151  throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
152  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
153  }
154 }
155 
156 void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
157 {
159  if (tensor.GetDataType() != DataType::QSymmS8 &&
160  tensor.GetDataType() != DataType::QuantizedSymm8PerAxis)
161  {
162  throw InvalidArgumentException(descName +
163  ": Expected data type which supports per-axis quantization scheme but got " +
164  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
165  }
167 }
168 
169 //---------------------------------------------------------------
170 void ValidateTensorQuantizationSpace(const TensorInfo& first,
171  const TensorInfo& second,
172  const std::string& descName,
173  std::string const& firstName,
174  std::string const& secondName)
175 {
176  if (!first.IsQuantized() ||
177  !second.IsQuantized())
178  {
179  // Not a quantized type, ignore the validation
180  return;
181  }
182 
183  DataType firstDataType = first.GetDataType();
184  DataType secondDataType = second.GetDataType();
185 
186  if (firstDataType != secondDataType)
187  {
188  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
189  " must be of the same quantized type, " +
190  firstName + " is " + GetDataTypeName(firstDataType) + ", " +
191  secondName + " is " + GetDataTypeName(secondDataType));
192  }
193 
194  if (!first.IsTypeSpaceMatch(second))
195  {
196  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
197  " must have the same quantization space, " +
198  firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
199  " and scale " + to_string(first.GetQuantizationScale()) + ", " +
200  secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
201  " and scale " + to_string(second.GetQuantizationScale()));
202  }
203 }
204 
205 //---------------------------------------------------------------
206 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
207  const TensorInfo& inputTensorInfo,
208  const TensorInfo& weightsTensorInfo,
209  const std::string& descName)
210 {
211  // Helper lambda function to validate a single bias quantization scale value
212  auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
213  {
214  constexpr float tolerance = 0.000001f;
215  if (std::abs(biasScale - expectedScale) > tolerance)
216  {
217  // Print the float values with extra precision to see very small differences
218  std::stringstream msg;
219  msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
220  " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
221  biasScale;
222  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
223  }
224  };
225 
226  if (biasTensor.GetQuantizationOffset() != 0)
227  {
228  throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
229  to_string(biasTensor.GetQuantizationOffset()));
230  }
231 
232  if (biasTensor.HasMultipleQuantizationScales())
233  {
234  // Validate per-axis quantization scales
235  const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
236  const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
237 
238  if (weightScales.size() != biasScales.size())
239  {
240  std::stringstream msg;
241  msg << descName << ": Expected matchhing number of per-axis quantization scales, but got different "
242  << "values: weights=" << weightScales.size() << ", biases=" << biasScales.size();
243  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
244  }
245 
246  for (size_t i = 0ul; i < biasScales.size(); ++i)
247  {
248  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
249  VerifyBiasQuantizationScale(biasScales[i], expectedScale);
250  }
251  }
252  else
253  {
254  // Validate per-tensor quantization scale
255  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
256  VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
257  }
258 }
259 
260 //---------------------------------------------------------------
261 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
262  unsigned int numExpected,
263  const std::string& descName,
264  const std::string& varName)
265 {
266  if (vec.empty() && numExpected > 0)
267  {
268  throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
269  }
270 
271  for (unsigned int i = 0; i < numExpected; ++i)
272  {
273  if (!vec[i])
274  {
275  throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
276  }
277  }
278 }
279 
280 //---------------------------------------------------------------
281 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
282  const TensorInfo& second,
283  const TensorInfo& output,
284  std::string const& descName,
285  std::string const& firstName,
286  std::string const& secondName)
287 {
288  // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
289  // broadcasted.
290  if (first.GetNumDimensions() != second.GetNumDimensions())
291  {
292  throw InvalidArgumentException(descName + ": Tensors "
293  + firstName + " & " + secondName
294  + " must have the same number of dimensions in order to be broadcasted");
295  }
296  uint32_t numDims = first.GetNumDimensions();
297  std::vector<uint32_t> outputDims(numDims, 0u);
298  for (uint32_t i = 0; i < numDims; i++)
299  {
300  const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
301  const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
302  if (dimsNotEqual && dimsNotOne)
303  {
304  throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
305  }
306  outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
307  }
308  TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
309  if (broadcastShape != output.GetShape())
310  {
311  throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
312  + firstName + " & " + secondName
313  + " does not match the output shape");
314  }
315 }
316 
317 //---------------------------------------------------------------
318 void ValidateDataTypes(const TensorInfo& info,
319  const std::vector<armnn::DataType>& supportedTypes,
320  std::string const& descName)
321 {
322  auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
323  if (iterator == supportedTypes.end())
324  {
325  throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
326  }
327 }
328 
329 //---------------------------------------------------------------
330 void ValidateTensorDataTypesMatch(const TensorInfo& first,
331  const TensorInfo& second,
332  std::string const& descName,
333  std::string const& firstName,
334  std::string const& secondName)
335 {
336  if (first.GetDataType() != second.GetDataType())
337  {
338  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
339  " must have identical data types.");
340  }
341 }
342 
343 //---------------------------------------------------------------
344 void ValidateTensorNumElementsMatch(const TensorInfo& first,
345  const TensorInfo& second,
346  std::string const& descName,
347  std::string const& firstName,
348  std::string const& secondName)
349 {
350  if (first.GetNumElements() != second.GetNumElements())
351  {
352  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
353  " must have the same number of elements.");
354  }
355 }
356 
357 void ValidateWeightDataType(const TensorInfo& inputInfo,
358  const TensorInfo& weightInfo,
359  const std::string& descName)
360 {
361  const DataType inputType = inputInfo.GetDataType();
362  if (IsQuantized8BitType(inputType))
363  {
365  const std::vector<DataType> validTypes =
366  {
367  DataType::QAsymmU8,
368  DataType::QAsymmS8,
369  DataType::QSymmS8,
370  DataType::QuantizedSymm8PerAxis // deprecated
371  };
373 
374  ValidateDataTypes(weightInfo, validTypes, descName);
375  }
376  else
377  {
378  ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
379  }
380 }
381 
382 void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
383  const std::string& descName,
384  const std::string& tensorName)
385 {
386  const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
387  if (!quantizationDim.has_value())
388  {
389  throw InvalidArgumentException(boost::str(
390  boost::format("%1%: Quantization dimension for per-axis quantization not set on tensor %2%.")
391  % descName % tensorName));
392  }
393 
394  if (quantizationDim.value() != 0)
395  {
396  throw InvalidArgumentException(boost::str(
397  boost::format("%1%: Quantization dimension for per-axis quantization expected to be 0 on tensor %2%, "
398  "but got: %3%") % descName % tensorName % quantizationDim.value()));
399  }
400 }
401 
402 void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
403  const std::string& descName,
404  const std::string& tensorName)
405 {
406  int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
407  if (quantizationOffset != 0)
408  {
409  throw InvalidArgumentException(boost::str(
410  boost::format("%1%: Quantization offset for per-axis quantization expected to be 0 on tensor %2%, "
411  "but got: %3%") % descName % tensorName % quantizationOffset));
412  }
413 }
414 
415 void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
416  const TensorInfo& outputInfo,
417  const TensorInfo& weightInfo,
418  const Optional<TensorInfo>& optionalBiasInfo,
419  const std::string& descName)
420 {
421  if (weightInfo.HasPerAxisQuantization())
422  {
423  const DataType inputDataType = inputInfo.GetDataType();
424  const DataType outputDataType = outputInfo.GetDataType();
425 
426  const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
427 
428  if (!canHavePerAxisQuantization)
429  {
430  throw InvalidArgumentException(boost::str(
431  boost::format("%1%: Per-axis quantization parameters set on tensor %2%, "
432  "but data type does not support per-axis quantization.") % descName % "weight"));
433  }
434 
435 
436  ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
437  ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
438  ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
439 
440  if (optionalBiasInfo.has_value())
441  {
442  const TensorInfo& biasInfo = optionalBiasInfo.value();
443  if (!biasInfo.HasPerAxisQuantization())
444  {
445  throw InvalidArgumentException(boost::str(
446  boost::format("%1%: Per-axis quantization parameters not set on bias tensor, despite being set on "
447  "weight tensor.") % descName));
448  }
449 
450  ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
451  ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
452  ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
453  }
454  }
455 }
456 
457 } // anonymous namespace
458 
459 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
460  unsigned int numExpectedIn, unsigned int numExpectedOut) const
461 {
462  ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
463  ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
464 }
465 
466 //---------------------------------------------------------------
467 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
468 {
469  const std::string descriptorName{"MemCopyQueueDescriptor"};
470 
471  ValidateNumInputs(workloadInfo, descriptorName, 1);
472  ValidateNumOutputs(workloadInfo, descriptorName , 1);
473 
474  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
475  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
476 
477  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
478  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
479 
480  if (m_Inputs.size() != m_Outputs.size())
481  {
482  throw InvalidArgumentException(boost::str(
483  boost::format("%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
484  descriptorName % m_Inputs.size() % m_Outputs.size()));
485  }
486 
487  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
488  {
489  if (!m_Inputs[i])
490  {
491  throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL input %2%.") %
492  descriptorName % i));
493  }
494 
495  if (!m_Outputs[i])
496  {
497  throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL output %2%") %
498  descriptorName % i));
499  }
500  }
501 }
502 
503 //---------------------------------------------------------------
504 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
505 {
506  ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
507  ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
508 
509  if (workloadInfo.m_InputTensorInfos.size() != 1)
510  {
511  throw InvalidArgumentException(boost::str(
512  boost::format("Number of input infos (%1%) is not 1.")
513  % workloadInfo.m_InputTensorInfos.size()));
514 
515  }
516 
517  if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
518  {
519  throw InvalidArgumentException(boost::str(
520  boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
521  % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
522  }
523 
524  for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
525  {
526  if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
527  workloadInfo.m_OutputTensorInfos[i].GetNumElements())
528  {
529  throw InvalidArgumentException(boost::str(
530  boost::format("Number of elements for tensor input and output %1% does not match")
531  % i ));
532  }
533  }
534 
535  if (m_Inputs.size() != 1)
536  {
537  throw InvalidArgumentException(boost::str(
538  boost::format("Number of inputs (%1%) is not 1.")
539  % m_Inputs.size()));
540  }
541 
542  if (m_Inputs.size() != m_Outputs.size())
543  {
544  throw InvalidArgumentException(boost::str(
545  boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
546  % m_Inputs.size() % m_Outputs.size()));
547  }
548 
549  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
550  {
551  if (!m_Inputs[i])
552  {
553  throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
554  }
555 
556  if (!m_Outputs[i])
557  {
558  throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
559  }
560  }
561 }
562 
563 //---------------------------------------------------------------
564 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
565 {
566  ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
567  ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
568 
569  if (m_Inputs.size() != 1)
570  {
571  throw InvalidArgumentException(boost::str(
572  boost::format("Number of inputs (%1%) is not 1.")
573  % m_Inputs.size()));
574  }
575 
576  if (m_Outputs.size() != 0)
577  {
578  throw InvalidArgumentException(boost::str(
579  boost::format("Number of outputs (%1%) is not 0.")
580  % m_Inputs.size() % m_Outputs.size()));
581  }
582 
583  if (!m_Inputs[0])
584  {
585  throw InvalidArgumentException(boost::str(boost::format("Invalid null input 0")));
586  }
587 }
588 
589 //---------------------------------------------------------------
590 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
591 {
592  const std::string descriptorName{"ActivationQueueDescriptor"};
593 
594  ValidateNumInputs(workloadInfo, descriptorName, 1);
595  ValidateNumOutputs(workloadInfo, descriptorName, 1);
596 
597  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
598  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
599 
600  std::vector<DataType> supportedTypes =
601  {
607  };
608 
609  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
610  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
611  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
612 }
613 
614 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
615 {
616  const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
617 
618  ValidateNumInputs(workloadInfo, descriptorName, 1);
619  ValidateNumOutputs(workloadInfo, descriptorName, 1);
620 
621  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
622  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
623 
624  if (outputTensorInfo.GetDataType() != DataType::Signed32)
625  {
626  throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32.");
627  }
628 
629  std::vector<DataType> supportedInputTypes =
630  {
636  };
637 
638  ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
639 
640  auto inputShape = inputTensorInfo.GetShape();
641  auto outputShape = outputTensorInfo.GetShape();
642 
643  auto inputNumDimensions = inputShape.GetNumDimensions();
644  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
645 
646  const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
647 
648  // 1D input shape results in scalar output shape
649  if (inputShape.GetNumDimensions() == 1)
650  {
651  if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
652  {
653  throw InvalidArgumentException(descriptorName + outputShapeError);
654  }
655  }
656  else
657  {
658  for (unsigned int i = 0; i < unsignedAxis; ++i)
659  {
660  if (outputShape[i] != inputShape[i])
661  {
662  throw InvalidArgumentException(descriptorName + outputShapeError);
663  }
664  }
665 
666  for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
667  {
668  if (outputShape[i - 1] != inputShape[i])
669  {
670  throw InvalidArgumentException(descriptorName + outputShapeError);
671  }
672  }
673  }
674 }
675 
676 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
677 {
678  const std::string descriptorName{"SoftmaxQueueDescriptor"};
679 
680  ValidateNumInputs(workloadInfo, descriptorName, 1);
681  ValidateNumOutputs(workloadInfo, descriptorName, 1);
682 
683  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
684  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
685 
686  std::vector<DataType> supportedTypes =
687  {
693  };
694 
695  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
696  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
697  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
698 }
699 
700 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
701 {
702  const std::string descriptorName{"SplitterQueueDescriptor"};
703 
704  ValidateNumInputs(workloadInfo, descriptorName, 1);
705 
706  // Check the supported data types
707  std::vector<DataType> supportedTypes =
708  {
715  };
716 
717  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
718  for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
719  {
720  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
721  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
722 
723  const std::string outputName = "output_" + std::to_string(i);
724  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
725  }
726 
727  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
728  {
729  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
730  }
731 
732  if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
733  {
735  descriptorName + ": Number of split windows "
736  "has to match number of workloadInfo.m_OutputTensorInfos. "
737  "Number of windows: " +
738  to_string(m_ViewOrigins.size()) +
739  ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
740  }
741 
742  //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
743  std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
744  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
745  {
746  //Checks that the dimensionality of input is same as the split windows.
747  ViewOrigin const& e = m_ViewOrigins[w];
748  if (e.m_Origin.size() != inputDims)
749  {
750  throw InvalidArgumentException(descriptorName + ": Window origin have to "
751  "have the same dimensionality as the input tensor. "
752  "Window origin (index: " +
753  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
754  " dimensions, the input "
755  "tensor has " +
756  to_string(inputDims) + " dimensions.");
757  }
758  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
759  {
760  if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
761  workloadInfo.m_InputTensorInfos[0].GetShape()[i])
762  {
763  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
764  "be smaller or equal than the size of the input in that coord.");
765  }
766  }
767  }
768 }
769 
770 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
771 {
772  const std::string descriptorName{"ConcatQueueDescriptor"};
773 
774  ValidateNumOutputs(workloadInfo, descriptorName, 1);
775 
776  if (m_Inputs.size() <= 0)
777  {
778  throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
779  }
780  if (m_Outputs.size() <= 0)
781  {
782  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
783  }
784 
785  if (workloadInfo.m_InputTensorInfos.size() <= 0)
786  {
787  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
788  }
789  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
790  {
791  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
792  }
793 
794  if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
795  {
796  throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
797  }
798 
799  if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
800  {
801  return;
802  }
803 
804  if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
805  {
807  descriptorName + ": Number of split windows "
808  "has to match number of workloadInfo.m_InputTensorInfos. "
809  "Number of windows: " +
810  to_string(m_ViewOrigins.size()) +
811  ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
812  }
813 
814  //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
815  std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
816  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
817  {
818  //Checks that the dimensionality of output is same as the split windows.
819  ViewOrigin const& e = m_ViewOrigins[w];
820  if (e.m_Origin.size() != outputDims)
821  {
822  throw InvalidArgumentException(descriptorName + ": Window origin have to "
823  "have the same dimensionality as the output tensor. "
824  "Window origin (index: " +
825  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
826  " dimensions, the output "
827  "tensor has " +
828  to_string(outputDims) + " dimensions.");
829  }
830  //Checks that the merge windows are within the output tensor.
831  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
832  {
833  if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
834  > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
835  {
836  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
837  "be smaller or equal than the size of the output in that coord.");
838  }
839  }
840  }
841 
842  // Check the supported data types
843  std::vector<DataType> supportedTypes =
844  {
851  };
852 
853  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
854  for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
855  {
856  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
857  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
858 
859  const std::string inputName = "input_" + std::to_string(i);
860  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
861  }
862 }
863 
864 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
865 {
866  const std::string descriptorName{"StackQueueDescriptor"};
867 
868  ValidateNumOutputs(workloadInfo, descriptorName, 1);
869 
870  if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
871  {
872  throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
873  }
874 
875  // All inputs must have the same shape, which is defined in parameters
876  const TensorShape& inputShape = m_Parameters.m_InputShape;
877  for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
878  {
879  if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
880  {
881  throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
882  }
883  }
884 
885  if (inputShape.GetNumDimensions() > 4)
886  {
887  throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
888  }
889 
890  // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
891  // since the output tensor has an additional dimension.
892  if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
893  {
894  throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
895  "than the number of input dimensions.");
896  }
897 
898  // Output shape must be as inferred from the input shape
899  const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
900  for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
901  {
902  if (outputShape[i] != inputShape[i])
903  {
904  throw InvalidArgumentException(descriptorName + ": Output tensor must "
905  "match shape inferred from input tensor.");
906  }
907  }
908 
909  if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
910  {
911  throw InvalidArgumentException(descriptorName + ": Output tensor must "
912  "match shape inferred from input tensor.");
913  }
914 
915  for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
916  {
917  if (outputShape[i] != inputShape[i-1])
918  {
919  throw InvalidArgumentException(descriptorName + ": Output tensor must "
920  "match shape inferred from input tensor.");
921  }
922  }
923 
924  if (outputShape.GetNumDimensions() > 5)
925  {
926  throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
927  }
928 
929  // Check the supported data types
930  std::vector<DataType> supportedTypes =
931  {
938  };
939 
940  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
941 
942  for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
943  {
944  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
945  workloadInfo.m_InputTensorInfos[i],
946  descriptorName,
947  "input_0",
948  "input_" + std::to_string(i));
949  }
950 
951  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
952  workloadInfo.m_OutputTensorInfos[0],
953  descriptorName,
954  "input_0",
955  "output");
956 }
957 
959 {
960  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
961 
962  ValidateNumInputs(workloadInfo, descriptorName, 1);
963  ValidateNumOutputs(workloadInfo, descriptorName, 1);
964 
965  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
966  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
967 
968  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
969 
970  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
971  {
972  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
973  }
974 
975  ValidatePointer(m_Weight, descriptorName, "weight");
976 
977  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
978  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
979 
980  if (m_Parameters.m_BiasEnabled)
981  {
982  ValidatePointer(m_Bias, descriptorName, "bias");
983 
984  // Validates type and quantization values.
985  const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
986  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
987 
988  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
989  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
990  }
991 
992  // Check the supported data types
993  std::vector<DataType> supportedTypes =
994  {
999  };
1000 
1001  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1002  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1003 }
1004 
1006 {
1007  const std::string descriptorName{"NormalizationQueueDescriptor"};
1008 
1009  ValidateNumInputs(workloadInfo, descriptorName, 1);
1010  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1011 
1012  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1013  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1014 
1015  // Check the supported data types
1016  std::vector<DataType> supportedTypes =
1017  {
1022  };
1023 
1024  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1025 
1026  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1027 
1028  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1029 }
1030 
1031 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1032 {
1033  const std::string descriptorName{"AdditionQueueDescriptor"};
1034 
1035  ValidateNumInputs(workloadInfo, descriptorName, 2);
1036  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1037 
1038  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1039  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1040  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1041 
1042  std::vector<DataType> supportedTypes =
1043  {
1049  };
1050 
1051  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1052  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1053  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1054 
1055  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1056  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1057 
1058  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1059  inputTensorInfo1,
1060  outputTensorInfo,
1061  descriptorName,
1062  "input_0",
1063  "input_1");
1064 }
1065 
1067 {
1068  const std::string descriptorName{"MultiplicationQueueDescriptor"};
1069 
1070  ValidateNumInputs(workloadInfo, descriptorName, 2);
1071  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1072 
1073  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1074  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1075  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1076 
1077  std::vector<DataType> supportedTypes =
1078  {
1084  };
1085 
1086  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1087  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1088  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1089 
1090  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1091  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1092 
1093  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1094  inputTensorInfo1,
1095  outputTensorInfo,
1096  descriptorName,
1097  "input_0",
1098  "input_1");
1099 }
1100 
1102 {
1103  const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
1104 
1105  ValidateNumInputs(workloadInfo, descriptorName, 1);
1106  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1107 
1108  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1109  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1110 
1111  std::vector<DataType> supportedTypes =
1112  {
1117  };
1118 
1119  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1120  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1121 
1122  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1123  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1124  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1125 
1126  ValidatePointer(m_Mean, descriptorName, "mean");
1127  ValidatePointer(m_Variance, descriptorName, "variance");
1128  ValidatePointer(m_Beta, descriptorName, "beta");
1129  ValidatePointer(m_Gamma, descriptorName, "gamma");
1130 
1131  const TensorInfo& mean = m_Mean->GetTensorInfo();
1132  const TensorInfo& variance = m_Variance->GetTensorInfo();
1133  const TensorInfo& beta = m_Beta->GetTensorInfo();
1134  const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1135 
1136  ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1137  ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1138  ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1139  ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
1140 
1141  ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1142  ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1143  ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
1144 }
1145 
1147 {
1148  const std::string descriptorName{"Convolution2dQueueDescriptor"};
1149 
1150  ValidateNumInputs(workloadInfo, descriptorName, 1);
1151  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1152 
1153  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1154  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1155 
1156  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1157  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1158 
1159  ValidatePointer(m_Weight, descriptorName, "weight");
1160 
1161  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1162  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1163 
1164  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1165 
1166  Optional<TensorInfo> optionalBiasTensorInfo;
1167  if (m_Parameters.m_BiasEnabled)
1168  {
1169  ValidatePointer(m_Bias, descriptorName, "bias");
1170 
1171  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1172  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1173 
1174  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1175  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1176  }
1177 
1178  ValidatePerAxisQuantization(inputTensorInfo,
1179  outputTensorInfo,
1180  weightTensorInfo,
1181  optionalBiasTensorInfo,
1182  descriptorName);
1183 
1184  std::vector<DataType> supportedTypes =
1185  {
1192  };
1193 
1194  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1195  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1196 }
1197 
1199 {
1200  const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1201 
1202  ValidateNumInputs(workloadInfo, descriptorName, 1);
1203  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1204 
1205  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1206  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1207 
1208  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1209  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1210 
1211  ValidatePointer(m_Weight, descriptorName, "weight");
1212 
1213  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1214  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1215 
1216  if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1217  {
1219  boost::str(boost::format("%1%: dilationX (provided %2%) and dilationY (provided %3%) "
1220  "cannot be smaller than 1.") % descriptorName %
1221  m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1222  }
1223 
1224  const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1225 
1226  // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1227  // inputChannels * channelMultiplier should be equal to outputChannels.
1228  const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1229  const unsigned int numWeightInputChannels = weightTensorInfo.GetShape()[1];
1230  const unsigned int numWeightOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1231  if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1232  {
1234  boost::str(boost::format("%1%: output_channels (provided %2%) should be "
1235  "equal to input_channels (provided %3%) multiplied by channel_multiplier "
1236  "(provided %4%).") % descriptorName % numWeightOutputChannels %
1237  numWeightInputChannels % numWeightChannelMultiplier));
1238  }
1239 
1240  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1241 
1242  Optional<TensorInfo> optionalBiasTensorInfo;
1243  if (m_Parameters.m_BiasEnabled)
1244  {
1245  ValidatePointer(m_Bias, descriptorName, "bias");
1246 
1247  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1248  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1249 
1250  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1251  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1252  }
1253  ValidatePerAxisQuantization(inputTensorInfo,
1254  outputTensorInfo,
1255  weightTensorInfo,
1256  optionalBiasTensorInfo,
1257  descriptorName);
1258 
1259  std::vector<DataType> supportedTypes =
1260  {
1266  };
1267 
1268  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1269  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1270 }
1271 
1272 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1273 {
1274  const std::string descriptorName{"PermuteQueueDescriptor"};
1275 
1276  ValidateNumInputs(workloadInfo, descriptorName, 1);
1277  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1278 
1279  const PermutationVector& mapping = m_Parameters.m_DimMappings;
1280 
1281  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1282  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1283 
1284  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1285  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1286 
1287  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1288  {
1289  if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1290  {
1291  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1292  " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1293  "must match dst dimension " + to_string(mapping[i]) +
1294  " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1295  }
1296  }
1297 
1298  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1299 }
1300 
1301 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1302 {
1303  const std::string descriptorName{"Pooling2dQueueDescriptor"};
1304 
1305  ValidateNumInputs(workloadInfo, descriptorName, 1);
1306  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1307 
1308  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1309  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1310 
1311  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1312  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1313 
1314  std::vector<DataType> supportedTypes =
1315  {
1321  };
1322 
1323  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1324  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1325 }
1326 
1328 {
1329  const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1330 
1331  ValidateNumInputs(workloadInfo, descriptorName, 1);
1332  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1333 
1334  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1335  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1336 
1337  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1338  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1339 
1340  std::vector<DataType> supportedTypes =
1341  {
1346  };
1347 
1348  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1349  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1350 
1351  // ResizeBilinear only changes width and height: batch and channel count must match.
1352  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1353  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1354  if (inputBatchSize != outputBatchSize)
1355  {
1357  boost::str(boost::format("%1%: Input batch size (%2%) "
1358  "does not match output batch size (%3%)") %
1359  descriptorName % inputBatchSize % outputBatchSize));
1360  }
1361 
1362  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1363  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1364  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1365  if (inputChannelCount != outputChannelCount)
1366  {
1368  boost::str(boost::format("%1%: Input channel count (%2%) "
1369  "does not match output channel count (%3%)") %
1370  descriptorName % inputChannelCount % outputChannelCount));
1371  }
1372 }
1373 
1374 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1375 {
1376  const std::string descriptorName{"ResizeQueueDescriptor"};
1377 
1378  ValidateNumInputs(workloadInfo, descriptorName, 1);
1379  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1380 
1381  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1382  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1383 
1384  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1385  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1386 
1387  std::vector<DataType> supportedTypes =
1388  {
1394  };
1395 
1396  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1397  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1398 
1399  // Resize only changes width and height: batch and channel count must match.
1400  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1401  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1402  if (inputBatchSize != outputBatchSize)
1403  {
1405  boost::str(boost::format("%1%: Input batch size (%2%) "
1406  "does not match output batch size (%3%)") %
1407  descriptorName % inputBatchSize % outputBatchSize));
1408  }
1409 
1410  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1411  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1412  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1413  if (inputChannelCount != outputChannelCount)
1414  {
1416  boost::str(boost::format("%1%: Input channel count (%2%) "
1417  "does not match output channel count (%3%)") %
1418  descriptorName % inputChannelCount % outputChannelCount));
1419  }
1420 }
1421 
1423 {
1424  const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1425 
1426  ValidateNumInputs(workloadInfo, descriptorName, 1);
1427  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1428 
1429  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1430  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1431 
1432  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1433  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1434 
1435  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1436 
1437  if (m_Parameters.m_Min > m_Parameters.m_Max)
1438  {
1439  throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1440  }
1441 }
1442 
1444 {
1445  const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1446 
1447  ValidateNumInputs(workloadInfo, descriptorName, 1);
1448  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1449 
1450  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1451  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1452 
1453  if (inputTensorInfo.GetNumDimensions() > 4)
1454  {
1455  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1456  }
1457 
1458  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1459 
1460  // Check the supported data types
1461  std::vector<DataType> supportedTypes =
1462  {
1465  };
1466 
1467  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1468  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1469 }
1470 
1472 {
1473  const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1474 
1475  ValidateNumInputs(workloadInfo, descriptorName, 1);
1476  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1477 
1478  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1479  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1480 
1481  if (inputTensorInfo.GetNumDimensions() > 4)
1482  {
1483  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1484  }
1485 
1486  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1487 
1488  // Check the supported data types
1489  std::vector<DataType> supportedTypes =
1490  {
1495  };
1496 
1497  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1498  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1499 }
1500 
1501 void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1502 {
1503  const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1504 
1505  ValidateNumInputs(workloadInfo, descriptorName, 1);
1506  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1507 
1508  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1509  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1510 
1511  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1512 
1513  std::vector<DataType> supportedTypes =
1514  {
1517  };
1518 
1519  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1520  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1521 }
1522 
1523 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1524 {
1525  const std::string descriptorName{"ConstantQueueDescriptor"};
1526 
1527  ValidateNumInputs(workloadInfo, descriptorName, 0);
1528  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1529 
1530  if (!m_LayerOutput)
1531  {
1532  throw InvalidArgumentException(descriptorName + ": No const input specified.");
1533  }
1534 
1535  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1536  ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1537 
1538  // Check the supported data types
1539  std::vector<DataType> supportedTypes =
1540  {
1548  };
1549 
1550  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1551 }
1552 
1553 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1554 {
1555  const std::string descriptorName{"ReshapeQueueDescriptor"};
1556 
1557  ValidateNumInputs(workloadInfo, descriptorName, 1);
1558  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1559 
1560  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1561  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1562 
1563  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1564 
1565  // Check the supported data types
1566  std::vector<DataType> supportedTypes =
1567  {
1574  };
1575 
1576  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1577  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1578 }
1579 
1581 {
1582  const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1583 
1584  ValidateNumInputs(workloadInfo, descriptorName, 1);
1585  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1586 
1587  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1588  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1589 
1590  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1591  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1592 
1593  if (m_Parameters.m_BlockShape.size() != 2)
1594  {
1595  throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1596  }
1597 
1598  if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1599  {
1600  throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1601  "dimensions as Block Shape.");
1602  }
1603 
1604  const TensorShape& inputShape = inputTensorInfo.GetShape();
1605 
1606  std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1607  std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1608 
1609  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1610 
1611  const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1612  widthPad.first + widthPad.second;
1613  const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1614  heightPad.first + heightPad.second;
1615 
1616  const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1617  inputShape[dimensionIndices.GetChannelsIndex()];
1618  const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1619 
1620  if (numOutputElements != numInputElements)
1621  {
1622  throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1623  to_string(numInputElements) + " after padding but output tensor has " +
1624  to_string(numOutputElements) + " elements.");
1625  }
1626 
1627  if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1628  {
1629  throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1630  "divisible by Block Shape in all spatial dimensions");
1631  }
1632 
1633  std::vector<DataType> supportedTypes =
1634  {
1639  };
1640 
1641  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1642  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1643 }
1644 
1646 {
1647  const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1648 
1649  ValidateNumInputs(workloadInfo, descriptorName, 1);
1650  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1651 
1652  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1653  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1654 
1655  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1656  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1657 
1658  std::vector<DataType> supportedTypes =
1659  {
1664  };
1665 
1666  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1667  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1668 
1669  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1670 
1671  if (m_Parameters.m_BlockSize == 0)
1672  {
1673  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1674  }
1675 
1676  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1677  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1678  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1679  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1680 
1681  const TensorShape& inputShape = inputTensorInfo.GetShape();
1682  if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1683  {
1684  throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1685  "by block size in all spatial dimensions");
1686  }
1687 
1688  const TensorShape& outputShape = outputTensorInfo.GetShape();
1689  if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1690  {
1691  throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1692  "must be divisible by the square of block size." );
1693  }
1694 }
1695 
1696 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1697 {
1698  const std::string descriptorName{"FloorQueueDescriptor"};
1699 
1700  ValidateNumInputs(workloadInfo, descriptorName, 1);
1701  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1702 
1703  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1704  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1705 
1706  std::vector<DataType> supportedTypes =
1707  {
1711  };
1712 
1713  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1714 
1715  if (inputTensorInfo != outputTensorInfo)
1716  {
1717  throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1718  }
1719 }
1720 
1721 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1722 {
1723  // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1724 
1725  const std::string descriptorName{"LstmQueueDescriptor"};
1726 
1727  // check dimensions of all inputs and outputs
1728  if (workloadInfo.m_InputTensorInfos.size() != 3)
1729  {
1730  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1731  }
1732  if (workloadInfo.m_OutputTensorInfos.size() != 4)
1733  {
1734  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1735  }
1736 
1737  std::vector<DataType> supportedTypes =
1738  {
1742  };
1743 
1744  // check for supported type of one input and match them with all the other input and output
1745  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1746 
1747  // type matches all other inputs
1748  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1749  {
1750  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1751  workloadInfo.m_InputTensorInfos[i],
1752  descriptorName,
1753  "input_0",
1754  "input_" + std::to_string(i));
1755  }
1756  // type matches all other outputs
1757  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1758  {
1759  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1760  workloadInfo.m_OutputTensorInfos[i],
1761  "LstmQueueDescriptor",
1762  "input_0",
1763  "output_" + std::to_string(i));
1764  }
1765 
1766  // Making sure clipping parameters have valid values.
1767  // == 0 means no clipping
1768  // > 0 means clipping
1769  if (m_Parameters.m_ClippingThresCell < 0.0f)
1770  {
1771  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
1772  }
1773  if (m_Parameters.m_ClippingThresProj < 0.0f)
1774  {
1775  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
1776  }
1777 
1778 
1779  // Inferring batch size, number of outputs and number of cells from the inputs.
1780  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1781  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1782  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1783  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1784  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1785  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1786 
1787  // input tensor
1788  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1789  descriptorName + " input_0");
1790  // outputStateInTensor
1791  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1792  descriptorName + " input_1");
1793  // outputStateInTensor
1794  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1795  descriptorName + " input_2");
1796  // scratchBufferTensor
1797  unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1798  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1799  descriptorName + " output_0");
1800  // outputStateOutTensor
1801  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1802  descriptorName + " output_1");
1803  // cellStateOutTensor
1804  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1805  descriptorName + " output_2");
1806  // outputTensor
1807  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1808  descriptorName + " output_3");
1809 
1810 
1811  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1812  if ( m_InputToInputWeights )
1813  {
1814  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1815  (n_cell * n_input), "InputLayerNormWeights");
1816  }
1817 
1818  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1819  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1820  (n_cell * n_input), "InputToForgetWeights");
1821 
1822  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1823  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1824  (n_cell * n_input), "InputToCellWeights");
1825 
1826  if ( m_RecurrentToInputWeights )
1827  {
1828  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1829  (n_cell * n_output), "RecurrentToInputWeights");
1830  }
1831 
1832  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1833  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1834  (n_cell * n_output), "RecurrentToForgetWeights");
1835 
1836  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1837  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1838  (n_cell * n_output), "RecurrentToCellWeights");
1839 
1840  // Make sure the input-gate's parameters are either both present (regular
1841  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1842  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1843  !m_Parameters.m_CifgEnabled) ||
1844  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1845  m_Parameters.m_CifgEnabled));
1846  if (!cifg_weights_all_or_none)
1847  {
1848  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
1849  "RecurrentToInputWeights must either both be present (regular LSTM) "
1850  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
1851  "accordingly.");
1852  }
1853 
1854  if ( m_CellToInputWeights )
1855  {
1856  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1857  n_cell, "CellToInputWeights");
1858  }
1859  if ( m_CellToForgetWeights )
1860  {
1861  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1862  n_cell, "CellToForgetWeights");
1863  }
1864  if ( m_CellToOutputWeights )
1865  {
1866  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1867  n_cell, "CellToOutputWeights");
1868  }
1869 
1870  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1871  bool peephole_weights_all_or_none =
1872  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1873  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1874  || ( !m_CellToInputWeights && !m_CellToForgetWeights
1875  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1876  if (!peephole_weights_all_or_none)
1877  {
1878  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
1879  }
1880 
1881  // Make sure the input gate bias is present only when not a CIFG-LSTM.
1882  if (m_Parameters.m_CifgEnabled)
1883  {
1884  if (m_InputGateBias)
1885  {
1886  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
1887  }
1888  }
1889  else
1890  {
1891  if (!m_InputGateBias)
1892  {
1893  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
1894  "must be present.");
1895  }
1896  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1897  n_cell, "InputGateBias");
1898  }
1899 
1900  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
1901  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
1902 
1903  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
1904  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
1905 
1906  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
1907  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
1908 
1909  if (m_ProjectionWeights)
1910  {
1911  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1912  (n_cell * n_output), "ProjectionWeights");
1913  }
1914  if (m_ProjectionBias)
1915  {
1916  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
1917  }
1918 
1919  // Making sure the projection tensors are consistent:
1920  // 1) If projection weight is not present, then projection bias should not be
1921  // present.
1922  // 2) If projection weight is present, then projection bias is optional.
1923  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1924  !m_Parameters.m_ProjectionEnabled)
1925  || (m_ProjectionWeights && !m_ProjectionBias &&
1926  m_Parameters.m_ProjectionEnabled)
1927  || (m_ProjectionWeights && m_ProjectionBias &&
1928  m_Parameters.m_ProjectionEnabled));
1929  if (!projecton_tensors_consistent)
1930  {
1931  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
1932  }
1933 
1934  // The four layer normalization weights either all have values or none of them have values. Additionally, if
1935  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
1936  // either all have values or none of them have values. Layer normalization is used when the values of all the
1937  // layer normalization weights are present
1938  if (m_InputLayerNormWeights)
1939  {
1940  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
1941  }
1942  if (m_ForgetLayerNormWeights)
1943  {
1944  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1945  }
1946  if (m_CellLayerNormWeights)
1947  {
1948  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1949  }
1950  if (m_OutputLayerNormWeights)
1951  {
1952  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1953  }
1954 
1955  if (m_Parameters.m_LayerNormEnabled)
1956  {
1957  if (!m_Parameters.m_CifgEnabled)
1958  {
1959  if (!m_InputLayerNormWeights)
1960  {
1961  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
1962  "disabled but InputLayerNormWeights are not present");
1963  }
1964  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1965  1, n_cell, "InputLayerNormWeights");
1966  }
1967  else if (m_InputLayerNormWeights)
1968  {
1969  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
1970  "enabled");
1971  }
1972 
1973  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
1974  "ForgetLayerNormWeights");
1975  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1976 
1977  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
1978  "OutputLayerNormWeights");
1979  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1980 
1981  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
1982  "CellLayerNormWeights");
1983  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1984  }
1985  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
1986  {
1987  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
1988  "normalisation weights are present.");
1989  }
1990 }
1991 
1993 {
1994  const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
1995 
1996  ValidateNumInputs(workloadInfo, descriptorName, 1);
1997  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1998 
1999  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2000  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2001 
2002  if (inputTensorInfo.GetDataType() != DataType::Float32)
2003  {
2004  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2005  }
2006 
2007  if (outputTensorInfo.GetDataType() != DataType::Float16)
2008  {
2009  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
2010  }
2011 
2012  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2013 }
2014 
2016 {
2017  const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
2018 
2019  ValidateNumInputs(workloadInfo, descriptorName, 1);
2020  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2021 
2022  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2023  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2024 
2025  if (inputTensorInfo.GetDataType() != DataType::Float16)
2026  {
2027  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
2028  }
2029 
2030  if (outputTensorInfo.GetDataType() != DataType::Float32)
2031  {
2032  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2033  }
2034 
2035  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2036 }
2037 
2038 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2039 {
2040  const std::string descriptorName{"DivisionQueueDescriptor"};
2041 
2042  ValidateNumInputs(workloadInfo, descriptorName, 2);
2043  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2044 
2045  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2046  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2047  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2048 
2049  std::vector<DataType> supportedTypes =
2050  {
2055  };
2056 
2057  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2058  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2059  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2060 
2061  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2062  inputTensorInfo1,
2063  outputTensorInfo,
2064  descriptorName,
2065  "input_0",
2066  "input_1");
2067 }
2068 
2070 {
2071  const std::string descriptorName{"SubtractionQueueDescriptor"};
2072 
2073  ValidateNumInputs(workloadInfo, descriptorName, 2);
2074  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2075 
2076  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2077  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2078  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2079 
2080  std::vector<DataType> supportedTypes =
2081  {
2086  };
2087 
2088  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2089  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2090  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2091 
2092  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2093  inputTensorInfo1,
2094  outputTensorInfo,
2095  descriptorName,
2096  "input_0",
2097  "input_1");
2098 }
2099 
2100 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2101 {
2102  const std::string descriptorName{"MaximumQueueDescriptor"};
2103 
2104  ValidateNumInputs(workloadInfo, descriptorName, 2);
2105  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2106 
2107  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2108  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2109  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2110 
2111  std::vector<DataType> supportedTypes =
2112  {
2119  };
2120 
2121  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2122  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2123  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2124 
2125  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2126  inputTensorInfo1,
2127  outputTensorInfo,
2128  descriptorName,
2129  "input_0",
2130  "input_1");
2131 }
2132 
2133 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2134 {
2135  const std::string descriptorName{"MeanQueueDescriptor"};
2136 
2137  ValidateNumInputs(workloadInfo, descriptorName, 1);
2138  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2139 
2140  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2141  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2142 
2143  std::vector<DataType> supportedTypes =
2144  {
2149  };
2150 
2151  // First check if input tensor data type is supported, then
2152  // check if this data type matches the output tensor data type
2153  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2154  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2155 
2156  if (m_Parameters.m_KeepDims)
2157  {
2158  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2159  }
2160  else if (m_Parameters.m_Axis.empty())
2161  {
2162  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
2163  }
2164  else
2165  {
2166  unsigned int outputDim =
2167  inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
2168  ValidateTensorNumDimensions(outputTensorInfo,
2169  descriptorName,
2170  outputDim > 0 ? outputDim : 1,
2171  "output");
2172  }
2173 }
2174 
2175 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2176 {
2177  const std::string descriptorName{"PadQueueDescriptor"};
2178 
2179  ValidateNumInputs(workloadInfo, descriptorName, 1);
2180  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2181 
2182  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2183  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2184 
2185  // input and output should have the same number of dimensions
2186  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2187 
2188  // there should be entry in the pad list for each dimension in the input tensor
2189  if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2190  throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2191  "as there are dimensions in the input tensor that is " +
2192  std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2193  " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
2194  }
2195 }
2196 
2197 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2198 {
2199  const std::string descriptorName{"QuantizeQueueDescriptor"};
2200 
2201  ValidateNumInputs(workloadInfo, descriptorName, 1);
2202  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2203 
2204  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2205  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2206 
2207  std::vector<DataType> supportedTypes =
2208  {
2215  };
2216 
2217  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2218 
2219  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2220  {
2221  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2222  }
2223 }
2224 
2226 {
2227  const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
2228 
2229  ValidateNumInputs(workloadInfo, descriptorName, 1);
2230  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2231 
2232  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2233  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2234 
2235  std::vector<DataType> supportedTypes =
2236  {
2241  };
2242 
2243  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2244  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2245 }
2246 
2248 {
2249  const std::string descriptorName{"StridedSliceQueueDescriptor"};
2250 
2251  ValidateNumInputs(workloadInfo, descriptorName, 1);
2252  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2253 
2254  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2255  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2256 
2257  std::vector<DataType> supportedTypes =
2258  {
2263  };
2264 
2265  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2266  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2267 
2268  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2269 
2270  const uint32_t rank = inputTensorInfo.GetNumDimensions();
2271  if (rank > 4)
2272  {
2273  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2274  }
2275 
2276  // Begin, End & Stride length must be of rank(input0)
2277  if (m_Parameters.m_Begin.size() != rank)
2278  {
2279  throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2280  }
2281 
2282  if (m_Parameters.m_End.size() != rank)
2283  {
2284  throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2285  }
2286 
2287  if (m_Parameters.m_Stride.size() != rank)
2288  {
2289  throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2290  }
2291 
2292  // Stride entries must be non-zero
2293  for (auto& stride : m_Parameters.m_Stride)
2294  {
2295  if (stride == 0)
2296  {
2297  throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2298  }
2299  }
2300 }
2301 
2302 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2303 {
2304  const std::string descriptorName{"MinimumQueueDescriptor"};
2305 
2306  ValidateNumInputs(workloadInfo, descriptorName, 2);
2307  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2308 
2309  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2310  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2311  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2312 
2313  std::vector<DataType> supportedTypes =
2314  {
2320  };
2321 
2322  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2323  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2324  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2325 
2326  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2327  inputTensorInfo1,
2328  outputTensorInfo,
2329  descriptorName,
2330  "input_0",
2331  "input_1");
2332 }
2333 
2334 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2335 {
2336  const std::string descriptorName{"DebugQueueDescriptor"};
2337 
2338  ValidateNumInputs(workloadInfo, descriptorName, 1);
2339  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2340 }
2341 
2342 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2343 {
2344  const std::string descriptorName{"EqualQueueDescriptor"};
2345 
2346  ValidateNumInputs(workloadInfo, descriptorName, 2);
2347  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2348 
2349  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2350  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2351  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2352 
2353  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2354  inputTensorInfo1,
2355  outputTensorInfo,
2356  descriptorName,
2357  "input_0",
2358  "input_1");
2359 
2360  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2361  {
2362  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2363  }
2364 }
2365 
2366 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2367 {
2368  const std::string descriptorName{"GreaterQueueDescriptor"};
2369 
2370  ValidateNumInputs(workloadInfo, descriptorName, 2);
2371  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2372 
2373  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2374  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2375  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2376 
2377  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2378  inputTensorInfo1,
2379  outputTensorInfo,
2380  descriptorName,
2381  "input_0",
2382  "input_1");
2383 
2384  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2385  {
2386  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2387  }
2388 }
2389 
2390 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2391 {
2392  const std::string descriptorName{"RsqrtQueueDescriptor"};
2393 
2394  ValidateNumInputs(workloadInfo, descriptorName, 1);
2395  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2396 
2397  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2398  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2399 
2400  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2401 
2402  std::vector<DataType> supportedTypes =
2403  {
2408  };
2409 
2410  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2411  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2412 }
2413 
2414 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2415 {
2416  const std::string descriptorName{"GatherQueueDescriptor"};
2417 
2418  ValidateNumInputs(workloadInfo, descriptorName, 2);
2419  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2420 
2421  const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2422  if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2423  {
2424  throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2425  }
2426 
2427  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2428  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2429 
2430  std::vector<DataType> supportedTypes =
2431  {
2436  };
2437 
2438  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2439 
2440  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2441 
2442  unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2443  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2444 }
2445 
2447 {
2448  const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2449 
2450  ValidateNumInputs(workloadInfo, descriptorName, 2);
2451 
2452  if (workloadInfo.m_OutputTensorInfos.size() != 4)
2453  {
2454  throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2455  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2456  }
2457 
2458  if (m_Anchors == nullptr)
2459  {
2460  throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2461  }
2462 
2463  const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2464  const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2465  const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2466 
2467  const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2468  const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2469  const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2470  const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2471 
2472  ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2473  ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2474  ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2475 
2476  const std::vector<DataType> supportedInputTypes =
2477  {
2482  };
2483 
2484  ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2485  ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2486  ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2487 
2488  ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2489  ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2490  ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2491  ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2492 
2493  // NOTE: Output is always Float32 regardless of input type
2494  ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2495  ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2496  ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2497  ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2498 
2499  if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2500  {
2501  throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2502  "must be positive and less than or equal to 1.");
2503  }
2504 
2505  if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2506  {
2507  throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2508  "should be equal to number of classes + 1.");
2509  }
2510 }
2511 
2512 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2513 {
2514  const std::string& descriptorName{"DequantizeQueueDescriptor"};
2515 
2516  ValidateNumInputs(workloadInfo, descriptorName, 1);
2517  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2518 
2519  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2520  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2521 
2522  if (!IsQuantizedType(inputTensorInfo.GetDataType()))
2523  {
2524  throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2525  }
2526 
2527  std::vector<DataType> supportedTypes =
2528  {
2531  };
2532 
2533  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2534 }
2535 
2536 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2537 {
2538  const std::string& descriptorName{"MergeQueueDescriptor"};
2539 
2540  ValidateNumInputs(workloadInfo, descriptorName, 2);
2541  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2542 
2543  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2544  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2545  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2546 
2547  ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2548  ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2549 
2550  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2551  ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2552 }
2553 
2554 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2555 {
2556  const std::string& descriptorName{"SwitchQueueDescriptor"};
2557 
2558  ValidateNumInputs(workloadInfo, descriptorName, 2);
2559  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2560 
2561  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2562  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2563 
2564  const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2565  const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2566 
2567  std::vector<DataType> supportedTypes =
2568  {
2572  };
2573 
2574  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2575  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2576 
2577  ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2578  ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2579 
2580  ValidateTensorShapesMatch(inputTensorInfo0,
2581  outputTensorInfo0,
2582  descriptorName,
2583  "input_0",
2584  "output_0");
2585 
2586  ValidateTensorShapesMatch(inputTensorInfo0,
2587  outputTensorInfo1,
2588  descriptorName,
2589  "input_0",
2590  "output_1");
2591 }
2592 
2593 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
2594 {
2595  // This is internally generated so it should not need validation.
2596 }
2597 
2598 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2599 {
2600  const std::string& descriptorName{"PreluQueueDescriptor"};
2601 
2602  ValidateNumInputs(workloadInfo, descriptorName, 2);
2603  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2604 
2605  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2606  const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2607  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2608 
2609  std::vector<DataType> supportedTypes
2610  {
2615  };
2616 
2617  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2618  ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2619 
2620  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2621 
2622  ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2623  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2624 
2625  ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2626  alphaTensorInfo,
2627  outputTensorInfo,
2628  descriptorName,
2629  "input",
2630  "alpha");
2631 }
2632 
2634 {
2635  const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2636 
2637  ValidateNumInputs(workloadInfo, descriptorName, 1);
2638  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2639 
2640  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2641  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2642 
2643  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2644  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2645 
2646  ValidatePointer(m_Weight, descriptorName, "weight");
2647 
2648  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2649  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2650 
2651  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2652 
2653  Optional<TensorInfo> optionalBiasTensorInfo;
2654  if (m_Parameters.m_BiasEnabled)
2655  {
2656  ValidatePointer(m_Bias, descriptorName, "bias");
2657 
2658  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2659  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
2660 
2661  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
2662  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2663  }
2664 
2665  ValidatePerAxisQuantization(inputTensorInfo,
2666  outputTensorInfo,
2667  weightTensorInfo,
2668  optionalBiasTensorInfo,
2669  descriptorName);
2670 
2671  std::vector<DataType> supportedTypes =
2672  {
2677  };
2678 
2679  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2680  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2681 }
2682 
2684 {
2685  const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
2686 
2687  // Validate number of inputs/outputs
2688  ValidateNumInputs(workloadInfo, descriptorName, 3);
2689  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2690 
2691  // Input/output tensor infos
2692  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2693  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
2694  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
2695 
2696  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2697  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2698 
2699  std::vector<DataType> inputOutputSupportedTypes =
2700  {
2702  };
2703 
2704  std::vector<DataType> cellStateSupportedTypes =
2705  {
2707  };
2708 
2709  std::vector<DataType> weightsSupportedTypes =
2710  {
2712  };
2713 
2714  std::vector<DataType> biasSupportedTypes =
2715  {
2717  };
2718 
2719  // Validate types of input/output tensors
2720  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2721  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2722  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2723 
2724  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2725  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2726 
2727  // Validate matching types of input/output tensors
2728  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2729  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2730  "outputStateIn", "outputStateOut");
2731  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2732 
2733  // Validate matching quantization info for input/output tensors
2734  ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2735  ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
2736  ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2737 
2738  // Infer number of batches, input size and output size from tensor dimensions
2739  const uint32_t numBatches = inputInfo.GetShape()[0];
2740  const uint32_t inputSize = inputInfo.GetShape()[1];
2741  const uint32_t outputSize = cellStateInInfo.GetShape()[1];
2742 
2743  // Validate number of dimensions and number of elements for input/output tensors
2744  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
2745  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
2746  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
2747  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
2748  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
2749 
2750  // Validate number of dimensions and number of elements for weights tensors
2751  ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
2752  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
2753  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
2754 
2755  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
2756  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2757  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
2758 
2759  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
2760  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2761  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
2762 
2763  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
2764  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2765  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
2766 
2767  ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
2768  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
2769  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
2770 
2771  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
2772  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2773  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
2774  " RecurrentToForgetWeights");
2775 
2776  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
2777  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2778  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2779 
2780  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
2781  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2782  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2783 
2784  // Validate data types for weights tensors (all should match each other)
2785  ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
2786 
2787  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
2788  "inputToInputWeights", "inputToForgetWeights");
2789  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
2790  "inputToInputWeights", "inputToCellWeights");
2791  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2792  "inputToInputWeights", "inputToOutputWeights");
2793 
2794  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
2795  "inputToInputWeights", "recurrentToInputWeights");
2796  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2797  "inputToInputWeights", "recurrentToForgeteights");
2798  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2799  "inputToInputWeights", "recurrentToCellWeights");
2800  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2801  "inputToInputWeights", "recurrentToOutputWeights");
2802 
2803  // Validate matching quantization info for weight tensors (all should match each other)
2804  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
2805  descriptorName, "inputToInputWeights", "inputToForgetWeights");
2806  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
2807  descriptorName, "inputToInputWeights", "inputToCellWeights");
2808  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
2809  descriptorName, "inputToInputWeights", "inputToOutputWeights");
2810 
2811  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
2812  descriptorName, "inputToInputWeights", "recurrentToInputWeights");
2813  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
2814  descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
2815  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
2816  descriptorName, "inputToInputWeights", "recurrentToCellWeights");
2817  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
2818  descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
2819 
2820  // Validate number of dimensions and number of elements in bias tensors
2821  ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
2822  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
2823  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
2824 
2825  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
2826  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2827  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
2828 
2829  ValidatePointer(m_CellBias, descriptorName, "CellBias");
2830  auto cellBiasInfo = m_CellBias->GetTensorInfo();
2831  ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
2832 
2833  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
2834  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2835  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
2836 
2837  // Validate data types for bias tensors (all should match each other)
2838  ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
2839 
2840  ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
2841  "inputGateBias", "forgetGateBias");
2842  ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
2843  "inputGateBias", "cellBias");
2844  ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
2845  "inputGateBias", "outputGateBias");
2846 
2847  // Validate bias tensor quantization info
2848  ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2849  ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2850  ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2851  ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2852 }
2853 
2854 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2855 {
2856  const std::string descriptorName{"AbsQueueDescriptor"};
2857 
2858  ValidateNumInputs(workloadInfo, descriptorName, 1);
2859  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2860 
2861  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2862  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2863 
2864  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2865 
2866  std::vector<DataType> supportedTypes =
2867  {
2872  };
2873 
2874  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2875  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2876 }
2877 
2878 void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2879 {
2880  const std::string descriptorName{"SliceQueueDescriptor"};
2881 
2882  ValidateNumInputs(workloadInfo, descriptorName, 1);
2883  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2884 
2885  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2886  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2887 
2888  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2889 
2890  const unsigned int rank = inputTensorInfo.GetNumDimensions();
2891  if (rank > 4)
2892  {
2893  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2894  }
2895 
2896  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
2897 
2898  // Check if m_Begin and m_Size have the expected length
2899  if (m_Parameters.m_Begin.size() != rank)
2900  {
2901  throw InvalidArgumentException(descriptorName +
2902  ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
2903  }
2904  if (m_Parameters.m_Size.size() != rank)
2905  {
2906  throw InvalidArgumentException(descriptorName +
2907  ": Length of size descriptor must equal rank " + std::to_string(rank));
2908  }
2909 
2910  // Check if the shape of the output tensor matches m_Size
2911  const TensorShape& outputShape = outputTensorInfo.GetShape();
2912  for (unsigned int i = 0u; i < rank; ++i)
2913  {
2914  if (m_Parameters.m_Size[i] != outputShape[i])
2915  {
2916  throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
2917  }
2918  }
2919 
2920  // Check if the sum of begin offset and size in a given dimension
2921  // does not exceed the size of corresponding input
2922  const TensorShape& inputShape = inputTensorInfo.GetShape();
2923  for(unsigned int i = 0u; i < rank; ++i)
2924  {
2925  if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
2926  {
2927  throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
2928  std::to_string(i) + " exceeds input size.");
2929  }
2930  }
2931 }
2932 
2934 {
2935  const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
2936 
2937  ValidateNumInputs(workloadInfo, descriptorName, 1);
2938  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2939 
2940  const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
2941  const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
2942 
2943  ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
2944  ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
2945 
2946  std::vector<DataType> supportedTypes =
2947  {
2952  };
2953 
2954  ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
2955  ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
2956 
2957  ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
2958 
2959  if (m_Parameters.m_BlockSize == 0)
2960  {
2961  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
2962  }
2963 
2964  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
2965  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
2966  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
2967  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
2968 
2969  const TensorShape& outputShape = outputInfo.GetShape();
2970  if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
2971  {
2972  throw InvalidArgumentException(descriptorName + ": Output width and height shape"
2973  "must be divisible by block size.");
2974  }
2975 
2976  const TensorShape& inputShape = inputInfo.GetShape();
2977  if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
2978  {
2979  throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
2980  "must be divisible by the square of block size." );
2981  }
2982 }
2983 
2984 void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2985 {
2986  const std::string descriptorName{"ComparisonQueueDescriptor"};
2987 
2988  ValidateNumInputs(workloadInfo, descriptorName, 2);
2989  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2990 
2991  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2992  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2993  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2994 
2995  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2996  inputTensorInfo1,
2997  outputTensorInfo,
2998  descriptorName,
2999  "input_0",
3000  "input_1");
3001 
3002  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3003  {
3004  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3005  }
3006 }
3007 
3009 {
3010  const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3011 
3012  ValidateNumInputs(workloadInfo, descriptorName, 1);
3013  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3014 
3015  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3016  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3017 
3018  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3019 
3020  std::vector<DataType> supportedTypes =
3021  {
3026  };
3027 
3028  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3029  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3030 }
3031 
3032 } // namespace armnn
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:232
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:264
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsQuantized() const
Definition: Tensor.cpp:290
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:232
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:280
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:218
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:98
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:237
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetChannelsIndex() const
SizeType GetSize() const
Definition: Types.hpp:199
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType
Definition: Types.hpp:32
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetDataType() const
Definition: Tensor.hpp:95
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin
void Validate(const WorkloadInfo &workloadInfo) const
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void Validate(const WorkloadInfo &workloadInfo) const
bool has_value() const noexcept
Definition: Optional.hpp:53
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:165
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
float GetQuantizationScale() const
Definition: Tensor.cpp:247
void Validate(const WorkloadInfo &workloadInfo) const
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:237
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::vector< unsigned int > m_Origin