ArmNN
 21.11
WorkloadData.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
11 #include <armnn/Logging.hpp>
12 
13 #include <algorithm>
14 #include <iomanip>
15 #include <string>
16 #include <sstream>
17 
18 #include <fmt/format.h>
19 
20 using namespace armnnUtils;
21 
22 namespace armnn
23 {
24 
25 //---------------------------------------------------------------
27 {
28  switch (inputDataType)
29  {
30  case DataType::Float16:
31  return DataType::Float16;
32  case DataType::BFloat16:
33  case DataType::Float32:
34  return DataType::Float32;
35  case DataType::QAsymmS8:
36  return DataType::Signed32;
37  case DataType::QAsymmU8:
38  return DataType::Signed32;
39  case DataType::QSymmS8:
40  return DataType::Signed32;
41  case DataType::QSymmS16:
42  return DataType::Signed32;
43  default:
44  ARMNN_ASSERT_MSG(false, "Invalid input data type");
45  return DataType::Float32;
46  }
47 }
48 
49 namespace
50 {
51 
52 //---------------------------------------------------------------
53 //android ndk does not support std::to_string function.
54 template <typename T>
55 std::string to_string(T value)
56 {
57  std::ostringstream os;
58  os << value;
59  return os.str();
60 }
61 
62 //---------------------------------------------------------------
63 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
64 {
65  if (!ptr)
66  {
67  throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
68  paramName + " parameter must be set.");
69  }
70 }
71 
72 //---------------------------------------------------------------
73 void ValidateTensorShapesMatch(const TensorInfo& first,
74  const TensorInfo& second,
75  std::string const& descName,
76  std::string const& firstName,
77  std::string const& secondName)
78 {
79  if (first.GetShape() != second.GetShape())
80  {
81  throw InvalidArgumentException(descName + ": "
82  + firstName + " & " + secondName + " must have identical shapes");
83  }
84 }
85 
86 //---------------------------------------------------------------
87 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
88 {
89  if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
90  {
91  throw InvalidArgumentException(descName +
92  ": Requires exactly " + to_string(expectedSize) + "input(s). " +
93  to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
94  }
95 }
96 
97 //---------------------------------------------------------------
98 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
99 {
100  if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
101  {
102  throw InvalidArgumentException(descName +
103  ": Requires exactly " + to_string(expectedSize) + " output(s). " +
104  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
105  }
106 }
107 
108 //---------------------------------------------------------------
109 void ValidateTensorNumDimensions(const TensorInfo& tensor,
110  std::string const& descName,
111  unsigned int numDimensions,
112  std::string const& tensorName)
113 {
114  if (tensor.GetNumDimensions() != numDimensions)
115  {
116  throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
117  to_string(tensor.GetNumDimensions()) + " dimensions for " +
118  tensorName + " tensor.");
119  }
120 }
121 
122 //---------------------------------------------------------------
123 void ValidateTensorNumElements(const TensorInfo& tensor,
124  std::string const& descName,
125  unsigned int numElements,
126  std::string const& tensorName)
127 {
128  if (tensor.GetNumElements() != numElements)
129  {
130  throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
131  to_string(tensor.GetNumElements()) + " elements for " +
132  tensorName + " tensor.");
133  }
134 }
135 
136 //---------------------------------------------------------------
137 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
138  unsigned int numDimension,
139  unsigned int numElements,
140  std::string const& tensorName)
141 {
142  const std::string functionName{"ValidateTensorNumDimNumElem"};
143  ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
144  ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
145 }
146 
147 //---------------------------------------------------------------
148 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
149  const std::string& descName, std::string const& tensorName)
150 {
151  if (tensor.GetDataType() != dataType)
152  {
153  throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
154  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
155  }
156 }
157 
158 void ValidPerAxisQuantizedDataType(const TensorInfo& tensor, const std::string& descName, const std::string& tensorName)
159 {
160  if (tensor.GetDataType() != DataType::QSymmS8)
161  {
162  throw InvalidArgumentException(descName +
163  ": Expected data type which supports per-axis quantization scheme but got " +
164  GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
165  }
166 }
167 
168 //---------------------------------------------------------------
169 void ValidateTensorQuantizationSpace(const TensorInfo& first,
170  const TensorInfo& second,
171  const std::string& descName,
172  std::string const& firstName,
173  std::string const& secondName)
174 {
175  if (!first.IsQuantized() ||
176  !second.IsQuantized())
177  {
178  // Not a quantized type, ignore the validation
179  return;
180  }
181 
182  DataType firstDataType = first.GetDataType();
183  DataType secondDataType = second.GetDataType();
184 
185  if (firstDataType != secondDataType)
186  {
187  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
188  " must be of the same quantized type, " +
189  firstName + " is " + GetDataTypeName(firstDataType) + ", " +
190  secondName + " is " + GetDataTypeName(secondDataType));
191  }
192 
193  if (!first.IsTypeSpaceMatch(second))
194  {
195  throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
196  " must have the same quantization space, " +
197  firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
198  " and scale " + to_string(first.GetQuantizationScale()) + ", " +
199  secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
200  " and scale " + to_string(second.GetQuantizationScale()));
201  }
202 }
203 
204 //---------------------------------------------------------------
205 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
206  const TensorInfo& inputTensorInfo,
207  const TensorInfo& weightsTensorInfo,
208  const std::string& descName)
209 {
210  // Helper lambda function to validate a single bias quantization scale value
211  auto VerifyBiasQuantizationScale = [&descName](float biasScale, float expectedScale) -> void
212  {
213  constexpr float tolerance = 0.0001f;
214  if (std::abs(biasScale - expectedScale) > tolerance)
215  {
216  // Print the float values with extra precision to see very small differences
217  ARMNN_LOG(warning) << std::setprecision(6) << descName << ": Expected " << expectedScale <<
218  " for bias quantization scale (product of input and weight scales), but got " <<
219  biasScale << ". Using scale provided.";
220  }
221  };
222 
223  if (biasTensor.GetQuantizationOffset() != 0)
224  {
225  throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
226  to_string(biasTensor.GetQuantizationOffset()));
227  }
228 
229  if (biasTensor.HasMultipleQuantizationScales() || weightsTensorInfo.HasMultipleQuantizationScales())
230  {
231  // Validate per-axis quantization scales
232  const std::vector<float>& weightScales = weightsTensorInfo.GetQuantizationScales();
233  const std::vector<float>& biasScales = biasTensor.GetQuantizationScales();
234 
235  if (weightScales.size() != biasScales.size())
236  {
237  std::stringstream msg;
238  msg << descName << ": Expected matching number of per-axis quantization scales for weights and bias, "
239  << "but got different values. This is currently unsupported: weights=" << weightScales.size()
240  << ", biases=" << biasScales.size();
241  throw InvalidArgumentException(msg.str(), CHECK_LOCATION());
242  }
243 
244  for (size_t i = 0ul; i < biasScales.size(); ++i)
245  {
246  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightScales[i];
247  VerifyBiasQuantizationScale(biasScales[i], expectedScale);
248  }
249  }
250  else
251  {
252  // Validate per-tensor quantization scale
253  const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
254  VerifyBiasQuantizationScale(biasTensor.GetQuantizationScale(), expectedScale);
255  }
256 }
257 
258 //---------------------------------------------------------------
259 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
260  unsigned int numExpected,
261  const std::string& descName,
262  const std::string& varName)
263 {
264  if (vec.empty() && numExpected > 0)
265  {
266  throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
267  }
268 
269  for (unsigned int i = 0; i < numExpected; ++i)
270  {
271  if (!vec[i])
272  {
273  throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
274  }
275  }
276 }
277 
278 //---------------------------------------------------------------
279 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
280  const TensorInfo& second,
281  const TensorInfo& output,
282  std::string const& descName,
283  std::string const& firstName,
284  std::string const& secondName)
285 {
286  // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
287  // broadcasted.
288  if (first.GetNumDimensions() != second.GetNumDimensions())
289  {
290  throw InvalidArgumentException(descName + ": Tensors "
291  + firstName + " & " + secondName
292  + " must have the same number of dimensions in order to be broadcasted");
293  }
294  uint32_t numDims = first.GetNumDimensions();
295  std::vector<uint32_t> outputDims(numDims, 0u);
296  for (uint32_t i = 0; i < numDims; i++)
297  {
298  const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
299  const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
300  if (dimsNotEqual && dimsNotOne)
301  {
302  throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
303  }
304  outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
305  }
306  TensorShape broadcastShape = TensorShape(armnn::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
307  if (broadcastShape != output.GetShape())
308  {
309  throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
310  + firstName + " & " + secondName
311  + " does not match the output shape");
312  }
313 }
314 
315 //---------------------------------------------------------------
316 void ValidateDataTypes(const TensorInfo& info,
317  const std::vector<armnn::DataType>& supportedTypes,
318  std::string const& descName)
319 {
320  auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
321  if (iterator == supportedTypes.end())
322  {
323  throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
324  }
325 }
326 
327 //---------------------------------------------------------------
328 void ValidateTensorDataTypesMatch(const TensorInfo& first,
329  const TensorInfo& second,
330  std::string const& descName,
331  std::string const& firstName,
332  std::string const& secondName)
333 {
334  if (first.GetDataType() != second.GetDataType())
335  {
336  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
337  " must have identical data types.");
338  }
339 }
340 
341 //---------------------------------------------------------------
342 void ValidateTensorNumElementsMatch(const TensorInfo& first,
343  const TensorInfo& second,
344  std::string const& descName,
345  std::string const& firstName,
346  std::string const& secondName)
347 {
348  if (first.GetNumElements() != second.GetNumElements())
349  {
350  throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
351  " must have the same number of elements.");
352  }
353 }
354 
355 void ValidateWeightDataType(const TensorInfo& inputInfo,
356  const TensorInfo& weightInfo,
357  const std::string& descName)
358 {
359  const DataType inputType = inputInfo.GetDataType();
360  if (IsQuantized8BitType(inputType))
361  {
362  const std::vector<DataType> validTypes =
363  {
364  DataType::QAsymmS8,
365  DataType::QAsymmU8,
366  DataType::QSymmS8
367  };
368 
369  ValidateDataTypes(weightInfo, validTypes, descName);
370  }
371  else
372  {
373  ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName, "input", "weight");
374  }
375 }
376 
377 void ValidatePerAxisQuantizationDimension(const TensorInfo& tensorInfo,
378  const std::string& descName,
379  const std::string& tensorName)
380 {
381  const Optional<unsigned int>& quantizationDim = tensorInfo.GetQuantizationDim();
382  if (!quantizationDim.has_value())
383  {
384  throw InvalidArgumentException(fmt::format("{0}: Quantization dimension for per-axis quantization "
385  "not set on tensor {1}.", descName, tensorName));
386  }
387 }
388 
389 void ValidatePerAxisQuantizationOffset(const TensorInfo& tensorInfo,
390  const std::string& descName,
391  const std::string& tensorName)
392 {
393  int32_t quantizationOffset = tensorInfo.GetQuantizationOffset();
394  if (quantizationOffset != 0)
395  {
396  throw InvalidArgumentException(fmt::format(
397  "{0}: Quantization offset for per-axis quantization expected to be 0 on tensor {1}, but got: {2}",
398  descName, tensorName, quantizationOffset));
399  }
400 }
401 
402 void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
403  const TensorInfo& outputInfo,
404  const TensorInfo& weightInfo,
405  const Optional<TensorInfo>& optionalBiasInfo,
406  const std::string& descName)
407 {
408  if (weightInfo.HasPerAxisQuantization())
409  {
410  const DataType inputDataType = inputInfo.GetDataType();
411  const DataType outputDataType = outputInfo.GetDataType();
412 
413  const bool canHavePerAxisQuantization = (IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
414 
415  if (!canHavePerAxisQuantization)
416  {
417  throw InvalidArgumentException(fmt::format(
418  "{0}: Per-axis quantization parameters set on tensor {1}, but data type does not support "
419  "per-axis quantization.", descName, "weight"));
420  }
421 
422 
423  ValidPerAxisQuantizedDataType(weightInfo, descName, "weight");
424  ValidatePerAxisQuantizationDimension(weightInfo, descName, "weight");
425  ValidatePerAxisQuantizationOffset(weightInfo, descName, "weight");
426 
427  if (optionalBiasInfo.has_value())
428  {
429  const TensorInfo& biasInfo = optionalBiasInfo.value();
430  if (!biasInfo.HasPerAxisQuantization())
431  {
432  throw InvalidArgumentException(fmt::format(
433  "{}: Per-axis quantization parameters not set on bias tensor, "
434  "despite being set on weight tensor.", descName));
435  }
436 
437  ValidateTensorDataType(biasInfo, DataType::Signed32, descName, "bias");
438  ValidatePerAxisQuantizationDimension(biasInfo, descName, "bias");
439  ValidatePerAxisQuantizationOffset(biasInfo, descName, "bias");
440  }
441  }
442 }
443 
444 } // anonymous namespace
445 
446 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
447  unsigned int numExpectedIn, unsigned int numExpectedOut) const
448 {
449  ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
450  ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
451 }
452 
453 //---------------------------------------------------------------
454 void MapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
455 {
456  const std::string descriptorName{"MapQueueDescriptor"};
457 
458  ValidateNumInputs(workloadInfo, descriptorName, 1);
459  ValidateNumOutputs(workloadInfo, descriptorName, 0);
460 
461  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
462  {
463  if (!m_Inputs[i])
464  {
466  fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
467  }
468  }
469 }
470 
471 //---------------------------------------------------------------
472 void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
473 {
474  const std::string descriptorName{"UnmapQueueDescriptor"};
475 
476  ValidateNumInputs(workloadInfo, descriptorName, 1);
477  ValidateNumOutputs(workloadInfo, descriptorName, 0);
478 
479  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
480  {
481  if (!m_Inputs[i])
482  {
484  fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
485  }
486  }
487 }
488 
489 //---------------------------------------------------------------
490 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
491 {
492  const std::string descriptorName{"MemCopyQueueDescriptor"};
493 
494  ValidateNumInputs(workloadInfo, descriptorName, 1);
495  ValidateNumOutputs(workloadInfo, descriptorName , 1);
496 
497  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
498  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
499 
500  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
501  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
502 
503  if (m_Inputs.size() != m_Outputs.size())
504  {
505  throw InvalidArgumentException(fmt::format(
506  "{0}: Number of inputs ({1}) does not match the number of outputs ({2}).",
507  descriptorName, m_Inputs.size(), m_Outputs.size()));
508  }
509 
510  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
511  {
512  if (!m_Inputs[i])
513  {
514  throw InvalidArgumentException(fmt::format(
515  "{0}: Invalid NULL input {1}.", descriptorName, i));
516  }
517 
518  if (!m_Outputs[i])
519  {
520  throw InvalidArgumentException(fmt::format("{0}: Invalid NULL output {1}", descriptorName, i));
521  }
522  }
523 }
524 
525 //---------------------------------------------------------------
526 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
527 {
528  ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
529  ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
530 
531  if (workloadInfo.m_InputTensorInfos.size() != 1)
532  {
533  throw InvalidArgumentException(fmt::format("Number of input infos ({}) is not 1.",
534  workloadInfo.m_InputTensorInfos.size()));
535 
536  }
537 
538  if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
539  {
540  throw InvalidArgumentException(fmt::format(
541  "Number of input infos ({0}) does not match the number of output infos ({1})",
542  workloadInfo.m_InputTensorInfos.size(), workloadInfo.m_OutputTensorInfos.size()));
543  }
544 
545  for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
546  {
547  if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
548  workloadInfo.m_OutputTensorInfos[i].GetNumElements())
549  {
550  throw InvalidArgumentException(fmt::format(
551  "Number of elements for tensor input and output {} does not match", i ));
552  }
553  }
554 
555  if (m_Inputs.size() != 1)
556  {
557  throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
558  }
559 
560  if (m_Inputs.size() != m_Outputs.size())
561  {
562  throw InvalidArgumentException(fmt::format(
563  "Number of inputs ({0}) does not match the number of outputs ({1})",
564  m_Inputs.size(), m_Outputs.size()));
565  }
566 
567  for (unsigned int i = 0; i < m_Inputs.size(); ++i)
568  {
569  if (!m_Inputs[i])
570  {
571  throw InvalidArgumentException(fmt::format("Invalid null input {}", i));
572  }
573 
574  if (!m_Outputs[i])
575  {
576  throw InvalidArgumentException(fmt::format("Invalid null output {}", i));
577  }
578  }
579 }
580 
581 //---------------------------------------------------------------
582 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
583 {
584  ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
585  ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
586 
587  if (m_Inputs.size() != 1)
588  {
589  throw InvalidArgumentException(fmt::format("Number of inputs ({}) is not 1.", m_Inputs.size()));
590  }
591 
592  if (m_Outputs.size() != 0)
593  {
594  throw InvalidArgumentException(fmt::format("Number of outputs ({}) is not 0.", m_Outputs.size()));
595  }
596 
597  if (!m_Inputs[0])
598  {
599  throw InvalidArgumentException(fmt::format("Invalid null input 0"));
600  }
601 }
602 
603 //---------------------------------------------------------------
604 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
605 {
606  const std::string descriptorName{"ActivationQueueDescriptor"};
607 
608  ValidateNumInputs(workloadInfo, descriptorName, 1);
609  ValidateNumOutputs(workloadInfo, descriptorName, 1);
610 
611  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
612  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
613 
614  std::vector<DataType> supportedTypes =
615  {
622  };
623 
624  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
625  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
626  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
627 }
628 
629 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
630 {
631  const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
632 
633  ValidateNumInputs(workloadInfo, descriptorName, 1);
634  ValidateNumOutputs(workloadInfo, descriptorName, 1);
635 
636  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
637  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
638 
639  if (outputTensorInfo.GetDataType() != DataType::Signed32 &&
640  outputTensorInfo.GetDataType() != DataType::Signed64)
641  {
642  throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32 or Int64.");
643  }
644 
645  std::vector<DataType> supportedInputTypes =
646  {
655  };
656 
657  ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
658 
659  auto inputShape = inputTensorInfo.GetShape();
660  auto outputShape = outputTensorInfo.GetShape();
661 
662  auto inputNumDimensions = inputShape.GetNumDimensions();
663  auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
664 
665  const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
666 
667  // 1D input shape results in scalar output shape
668  if (inputShape.GetNumDimensions() == 1)
669  {
670  if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
671  {
672  throw InvalidArgumentException(descriptorName + outputShapeError);
673  }
674  }
675  else
676  {
677  for (unsigned int i = 0; i < unsignedAxis; ++i)
678  {
679  if (outputShape[i] != inputShape[i])
680  {
681  throw InvalidArgumentException(descriptorName + outputShapeError);
682  }
683  }
684 
685  for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
686  {
687  if (outputShape[i - 1] != inputShape[i])
688  {
689  throw InvalidArgumentException(descriptorName + outputShapeError);
690  }
691  }
692  }
693 }
694 
695 void CastQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
696 {
697  const std::string descriptorName{"CastQueueDescriptor"};
698 
699  ValidateNumInputs(workloadInfo, descriptorName, 1);
700  ValidateNumOutputs(workloadInfo, descriptorName, 1);
701 
702  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
703  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
704 
705  std::vector<DataType> supportedTypes =
706  {
716  };
717 
718  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
719  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
720 }
721 
722 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
723 {
724  const std::string descriptorName{"SoftmaxQueueDescriptor"};
725 
726  ValidateNumInputs(workloadInfo, descriptorName, 1);
727  ValidateNumOutputs(workloadInfo, descriptorName, 1);
728 
729  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
730  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
731 
732  std::vector<DataType> supportedTypes =
733  {
740  };
741 
742  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
743  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
744  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
745 }
746 
747 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
748 {
749  const std::string descriptorName{"SplitterQueueDescriptor"};
750 
751  ValidateNumInputs(workloadInfo, descriptorName, 1);
752 
753  // Check the supported data types
754  std::vector<DataType> supportedTypes =
755  {
764  };
765 
766  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
767  for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
768  {
769  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
770  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
771 
772  const std::string outputName = "output_" + std::to_string(i);
773  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
774  }
775 
776  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
777  {
778  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
779  }
780 
781  if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
782  {
784  descriptorName + ": Number of split windows "
785  "has to match number of workloadInfo.m_OutputTensorInfos. "
786  "Number of windows: " +
787  to_string(m_ViewOrigins.size()) +
788  ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
789  }
790 
791  //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
792  std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
793  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
794  {
795  //Checks that the dimensionality of input is same as the split windows.
796  ViewOrigin const& e = m_ViewOrigins[w];
797  if (e.m_Origin.size() != inputDims)
798  {
799  throw InvalidArgumentException(descriptorName + ": Window origin have to "
800  "have the same dimensionality as the input tensor. "
801  "Window origin (index: " +
802  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
803  " dimensions, the input "
804  "tensor has " +
805  to_string(inputDims) + " dimensions.");
806  }
807  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
808  {
809  if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
810  workloadInfo.m_InputTensorInfos[0].GetShape()[i])
811  {
812  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
813  "be smaller or equal than the size of the input in that coord.");
814  }
815  }
816  }
817 }
818 
819 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
820 {
821  const std::string descriptorName{"ConcatQueueDescriptor"};
822 
823  ValidateNumOutputs(workloadInfo, descriptorName, 1);
824 
825  if (m_Inputs.size() <= 0)
826  {
827  throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
828  }
829  if (m_Outputs.size() <= 0)
830  {
831  throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
832  }
833 
834  if (workloadInfo.m_InputTensorInfos.size() <= 0)
835  {
836  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
837  }
838  if (workloadInfo.m_OutputTensorInfos.size() <= 0)
839  {
840  throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
841  }
842 
843  if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
844  {
845  throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
846  }
847 
848  if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
849  {
850  return;
851  }
852 
853  if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
854  {
856  descriptorName + ": Number of split windows "
857  "has to match number of workloadInfo.m_InputTensorInfos. "
858  "Number of windows: " +
859  to_string(m_ViewOrigins.size()) +
860  ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
861  }
862 
863  //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
864  std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
865  for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
866  {
867  //Checks that the dimensionality of output is same as the split windows.
868  ViewOrigin const& e = m_ViewOrigins[w];
869  if (e.m_Origin.size() != outputDims)
870  {
871  throw InvalidArgumentException(descriptorName + ": Window origin have to "
872  "have the same dimensionality as the output tensor. "
873  "Window origin (index: " +
874  to_string(w) + ") has " + to_string(e.m_Origin.size()) +
875  " dimensions, the output "
876  "tensor has " +
877  to_string(outputDims) + " dimensions.");
878  }
879  //Checks that the merge windows are within the output tensor.
880  for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
881  {
882  if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
883  > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
884  {
885  throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
886  "be smaller or equal than the size of the output in that coord.");
887  }
888  }
889  }
890 
891  // Check the supported data types
892  std::vector<DataType> supportedTypes =
893  {
902  };
903 
904  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
905  for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
906  {
907  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
908  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
909 
910  const std::string inputName = "input_" + std::to_string(i);
911  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
912  }
913 }
914 
915 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
916 {
917  const std::string descriptorName{"StackQueueDescriptor"};
918 
919  ValidateNumOutputs(workloadInfo, descriptorName, 1);
920 
921  if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
922  {
923  throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
924  }
925 
926  // All inputs must have the same shape, which is defined in parameters
927  const TensorShape& inputShape = m_Parameters.m_InputShape;
928  for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
929  {
930  if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
931  {
932  throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
933  }
934  }
935 
936  if (inputShape.GetNumDimensions() > 4)
937  {
938  throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
939  }
940 
941  // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
942  // since the output tensor has an additional dimension.
943  if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
944  {
945  throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
946  "than the number of input dimensions.");
947  }
948 
949  // Output shape must be as inferred from the input shape
950  const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
951  for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
952  {
953  if (outputShape[i] != inputShape[i])
954  {
955  throw InvalidArgumentException(descriptorName + ": Output tensor must "
956  "match shape inferred from input tensor.");
957  }
958  }
959 
960  if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
961  {
962  throw InvalidArgumentException(descriptorName + ": Output tensor must "
963  "match shape inferred from input tensor.");
964  }
965 
966  for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
967  {
968  if (outputShape[i] != inputShape[i-1])
969  {
970  throw InvalidArgumentException(descriptorName + ": Output tensor must "
971  "match shape inferred from input tensor.");
972  }
973  }
974 
975  if (outputShape.GetNumDimensions() > 5)
976  {
977  throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
978  }
979 
980  // Check the supported data types
981  std::vector<DataType> supportedTypes =
982  {
991  };
992 
993  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
994 
995  for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
996  {
997  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
998  workloadInfo.m_InputTensorInfos[i],
999  descriptorName,
1000  "input_0",
1001  "input_" + std::to_string(i));
1002  }
1003 
1004  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1005  workloadInfo.m_OutputTensorInfos[0],
1006  descriptorName,
1007  "input_0",
1008  "output");
1009 }
1010 
1011 void FillQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1012 {
1013  const std::string descriptorName{"FillQueueDescriptor"};
1014 
1015  ValidateNumInputs(workloadInfo, descriptorName, 1);
1016  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1017 
1018  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1019  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1020 
1021  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 1, "input");
1022 
1023  std::vector<DataType> supportedTypes =
1024  {
1029  };
1030 
1031  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1032 }
1033 
1035 {
1036  const std::string descriptorName{"FullyConnectedQueueDescriptor"};
1037 
1038  uint32_t numInputs = 2;
1039  if (m_Parameters.m_BiasEnabled)
1040  {
1041  numInputs = 3;
1042  }
1043 
1044  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1045  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1046 
1047  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1048  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1049 
1050  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1051 
1052  if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
1053  {
1054  throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
1055  }
1056 
1057  TensorInfo weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
1058  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
1059 
1060  if (m_Parameters.m_BiasEnabled)
1061  {
1062  TensorInfo biasTensorInfo = workloadInfo.m_InputTensorInfos[2];
1063  // Validates type and quantization values.
1064  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1065  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1066  ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1067  }
1068 
1069  // Check the supported data types
1070  std::vector<DataType> supportedTypes =
1071  {
1078  };
1079 
1080  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1081 
1082  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1083  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1084  {
1085  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1086  {
1087  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1088  "for BFloat16 input.");
1089  }
1090  }
1091  else
1092  {
1093  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1094  }
1095 }
1096 
1098 {
1099  const std::string descriptorName{"NormalizationQueueDescriptor"};
1100 
1101  ValidateNumInputs(workloadInfo, descriptorName, 1);
1102  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1103 
1104  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1105  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1106 
1107  // Check the supported data types
1108  std::vector<DataType> supportedTypes =
1109  {
1116  };
1117 
1118  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1119 
1120  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1121 
1122  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1123 }
1124 
1125 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1126 {
1127  const std::string descriptorName{"AdditionQueueDescriptor"};
1128 
1129  ValidateNumInputs(workloadInfo, descriptorName, 2);
1130  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1131 
1132  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1133  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1134  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1135 
1136  std::vector<DataType> supportedTypes =
1137  {
1145  };
1146 
1147  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1148  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1149  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1150 
1151  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1152  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1153 
1154  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1155  inputTensorInfo1,
1156  outputTensorInfo,
1157  descriptorName,
1158  "input_0",
1159  "input_1");
1160 }
1161 
1163 {
1164  const std::string descriptorName{"MultiplicationQueueDescriptor"};
1165 
1166  ValidateNumInputs(workloadInfo, descriptorName, 2);
1167  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1168 
1169  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1170  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1171  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1172 
1173  std::vector<DataType> supportedTypes =
1174  {
1182  };
1183 
1184  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1185  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1186  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1187 
1188  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
1189  ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
1190 
1191  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1192  inputTensorInfo1,
1193  outputTensorInfo,
1194  descriptorName,
1195  "input_0",
1196  "input_1");
1197 }
1198 
1200 {
1201  const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
1202 
1203  ValidateNumInputs(workloadInfo, descriptorName, 1);
1204  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1205 
1206  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1207  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1208 
1209  std::vector<DataType> supportedTypes =
1210  {
1217  };
1218 
1219  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1220  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1221 
1222  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1223  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1224 
1225  ValidatePointer(m_Mean, descriptorName, "mean");
1226  ValidatePointer(m_Variance, descriptorName, "variance");
1227  ValidatePointer(m_Beta, descriptorName, "beta");
1228  ValidatePointer(m_Gamma, descriptorName, "gamma");
1229 
1230  const TensorInfo& mean = m_Mean->GetTensorInfo();
1231  const TensorInfo& variance = m_Variance->GetTensorInfo();
1232  const TensorInfo& beta = m_Beta->GetTensorInfo();
1233  const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1234 
1235  ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
1236  ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
1237  ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
1238  ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
1239 
1240  ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
1241  ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
1242  ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
1243 }
1244 
1246 {
1247  const std::string descriptorName{"Convolution2dQueueDescriptor"};
1248 
1249  ValidateNumInputs(workloadInfo, descriptorName, 1);
1250  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1251 
1252  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1253  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1254 
1255  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1256  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1257 
1258  ValidatePointer(m_Weight, descriptorName, "weight");
1259 
1260  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1261  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1262 
1263  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1264 
1265  Optional<TensorInfo> optionalBiasTensorInfo;
1266  if (m_Parameters.m_BiasEnabled)
1267  {
1268  ValidatePointer(m_Bias, descriptorName, "bias");
1269 
1270  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1271  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1272 
1273  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1274  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1275  }
1276 
1277  if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1278  {
1280  fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1281  "cannot be either negative or 0.",
1282  descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1283  }
1284 
1285  ValidatePerAxisQuantization(inputTensorInfo,
1286  outputTensorInfo,
1287  weightTensorInfo,
1288  optionalBiasTensorInfo,
1289  descriptorName);
1290 
1291  std::vector<DataType> supportedTypes =
1292  {
1300  };
1301 
1302  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1303 
1304  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1305  if (inputTensorInfo.GetDataType() == DataType::BFloat16)
1306  {
1307  if (outputTensorInfo.GetDataType() != DataType::BFloat16 && outputTensorInfo.GetDataType() != DataType::Float32)
1308  {
1309  throw InvalidArgumentException(descriptorName + ": " + " Output tensor type must be BFloat16 or Float32 "
1310  "for BFloat16 input.");
1311  }
1312  }
1313  else
1314  {
1315  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1316  }
1317 }
1318 
1320 {
1321  const std::string descriptorName{"Convolution3dQueueDescriptor"};
1322 
1323  uint32_t numInputs = 2;
1324  if (m_Parameters.m_BiasEnabled)
1325  {
1326  numInputs = 3;
1327  }
1328  ValidateNumInputs(workloadInfo, descriptorName, numInputs);
1329  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1330 
1331  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1332  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1333 
1334  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 5, "input");
1335  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 5, "output");
1336 
1337  const TensorInfo& weightTensorInfo = workloadInfo.m_InputTensorInfos[1];
1338  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 5, "weight");
1339 
1340  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1341 
1342  Optional<TensorInfo> optionalBiasTensorInfo;
1343  if (m_Parameters.m_BiasEnabled)
1344  {
1345  optionalBiasTensorInfo = MakeOptional<TensorInfo>(workloadInfo.m_InputTensorInfos[2]);
1346  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1347 
1348  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1349  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1350  }
1351 
1352  if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 || m_Parameters.m_StrideZ <= 0 )
1353  {
1355  fmt::format("{}: strideX (provided {}), strideY (provided {}) or strideZ (provided {})"
1356  "cannot be either negative or 0.",
1357  descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY, m_Parameters.m_StrideZ));
1358  }
1359 
1360  ValidatePerAxisQuantization(inputTensorInfo,
1361  outputTensorInfo,
1362  weightTensorInfo,
1363  optionalBiasTensorInfo,
1364  descriptorName);
1365 
1366  std::vector<DataType> supportedTypes =
1367  {
1375  };
1376 
1377  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1378  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1379 }
1380 
1382 {
1383  const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1384 
1385  ValidateNumInputs(workloadInfo, descriptorName, 1);
1386  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1387 
1388  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1389  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1390 
1391  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1392  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1393 
1394  ValidatePointer(m_Weight, descriptorName, "weight");
1395 
1396  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1397  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1398 
1399  if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1400  {
1402  fmt::format("{}: dilationX (provided {}) and dilationY (provided {}) "
1403  "cannot be smaller than 1.",
1404  descriptorName, m_Parameters.m_DilationX, m_Parameters.m_DilationX));
1405  }
1406 
1407  if (m_Parameters.m_StrideX <= 0 || m_Parameters.m_StrideY <= 0 )
1408  {
1410  fmt::format("{}: strideX (provided {}) and strideY (provided {}) "
1411  "cannot be either negative or 0.",
1412  descriptorName, m_Parameters.m_StrideX, m_Parameters.m_StrideY));
1413  }
1414 
1415  const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1416 
1417  // Expected weight shape: [ 1, H, W, I*M ] - This shape does NOT depend on the data layout
1418  // inputChannels * channelMultiplier should be equal to outputChannels.
1419  const unsigned int numWeightOutputChannels = weightTensorInfo.GetShape()[3]; // I*M=Cout
1420  const unsigned int numOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1421  if (numWeightOutputChannels != numOutputChannels)
1422  {
1423  throw InvalidArgumentException(fmt::format(
1424  "{0}: The weight format in armnn is expected to be [1, H, W, Cout]."
1425  "But 4th dimension is not equal to Cout. Cout = {1} Provided weight shape: [{2}, {3}, {4}, {5}]",
1426  descriptorName,
1427  numOutputChannels,
1428  weightTensorInfo.GetShape()[0],
1429  weightTensorInfo.GetShape()[1],
1430  weightTensorInfo.GetShape()[2],
1431  weightTensorInfo.GetShape()[3]));
1432  }
1433  if (weightTensorInfo.GetShape()[0] != 1)
1434  {
1435  throw InvalidArgumentException(fmt::format(
1436  "{0}: The weight format in armnn is expected to be [1, H, W, Cout]."
1437  "But first dimension is not equal to 1. Provided weight shape: [{1}, {2}, {3}, {4}]",
1438  descriptorName,
1439  weightTensorInfo.GetShape()[0],
1440  weightTensorInfo.GetShape()[1],
1441  weightTensorInfo.GetShape()[2],
1442  weightTensorInfo.GetShape()[3]));
1443  }
1444 
1445  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1446 
1447  Optional<TensorInfo> optionalBiasTensorInfo;
1448  if (m_Parameters.m_BiasEnabled)
1449  {
1450  ValidatePointer(m_Bias, descriptorName, "bias");
1451 
1452  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1453  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
1454 
1455  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1456  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1457  }
1458  ValidatePerAxisQuantization(inputTensorInfo,
1459  outputTensorInfo,
1460  weightTensorInfo,
1461  optionalBiasTensorInfo,
1462  descriptorName);
1463 
1464  std::vector<DataType> supportedTypes =
1465  {
1472  };
1473 
1474  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1475  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1476 }
1477 
1478 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1479 {
1480  const std::string descriptorName{"PermuteQueueDescriptor"};
1481 
1482  ValidateNumInputs(workloadInfo, descriptorName, 1);
1483  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1484 
1485  const PermutationVector& mapping = m_Parameters.m_DimMappings;
1486 
1487  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1488  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1489 
1490  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1491  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1492 
1493  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1494  {
1495  if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1496  {
1497  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1498  " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1499  "must match dst dimension " + to_string(mapping[i]) +
1500  " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1501  }
1502  }
1503 
1504  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1505 }
1506 
1507 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1508 {
1509  const std::string descriptorName{"Pooling2dQueueDescriptor"};
1510 
1511  ValidateNumInputs(workloadInfo, descriptorName, 1);
1512  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1513 
1514  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1515  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1516 
1517  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1518  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1519 
1520  std::vector<DataType> supportedTypes =
1521  {
1528  };
1529 
1530  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1531  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1532 }
1533 
1534 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1535 {
1536  const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1537 
1538  ValidateNumInputs(workloadInfo, descriptorName, 1);
1539  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1540 
1541  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1542  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1543 
1544  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1545  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1546 
1547  std::vector<DataType> supportedTypes =
1548  {
1555  };
1556 
1557  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1558  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1559 
1560  // ResizeBilinear only changes width and height: batch and channel count must match.
1561  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1562  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1563  if (inputBatchSize != outputBatchSize)
1564  {
1566  fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1567  descriptorName, inputBatchSize, outputBatchSize));
1568  }
1569 
1570  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1571  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1572  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1573  if (inputChannelCount != outputChannelCount)
1574  {
1576  fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1577  descriptorName, inputChannelCount, outputChannelCount));
1578  }
1579 }
1580 
1581 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1582 {
1583  const std::string descriptorName{"ResizeQueueDescriptor"};
1584 
1585  ValidateNumInputs(workloadInfo, descriptorName, 1);
1586  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1587 
1588  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1589  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1590 
1591  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1592  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1593 
1594  std::vector<DataType> supportedTypes =
1595  {
1602  };
1603 
1604  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1605  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1606 
1607  // Resize only changes width and height: batch and channel count must match.
1608  const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1609  const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1610  if (inputBatchSize != outputBatchSize)
1611  {
1613  fmt::format("{}: Input batch size ({}) does not match output batch size ({})",
1614  descriptorName, inputBatchSize, outputBatchSize));
1615  }
1616 
1617  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1618  const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1619  const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1620  if (inputChannelCount != outputChannelCount)
1621  {
1623  fmt::format("{}: Input channel count ({}) does not match output channel count ({})",
1624  descriptorName, inputChannelCount, outputChannelCount));
1625  }
1626 }
1627 
1629 {
1630  const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1631 
1632  ValidateNumInputs(workloadInfo, descriptorName, 1);
1633  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1634 
1635  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1636  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1637 
1638  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1639  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1640 
1641  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1642 
1643  if (m_Parameters.m_Min > m_Parameters.m_Max)
1644  {
1645  throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1646  }
1647 }
1648 
1650 {
1651  const std::string descriptorName{"InstanceNormalizationQueueDescriptor"};
1652 
1653  ValidateNumInputs(workloadInfo, descriptorName, 1);
1654  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1655 
1656  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1657  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1658 
1659  if (inputTensorInfo.GetNumDimensions() > 4)
1660  {
1661  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1662  }
1663 
1664  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1665 
1666  // Check the supported data types
1667  std::vector<DataType> supportedTypes =
1668  {
1672  };
1673 
1674  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1675  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1676 }
1677 
1679 {
1680  const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1681 
1682  ValidateNumInputs(workloadInfo, descriptorName, 1);
1683  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1684 
1685  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1686  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1687 
1688  if (inputTensorInfo.GetNumDimensions() > 4)
1689  {
1690  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1691  }
1692 
1693  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1694 
1695  // Check the supported data types
1696  std::vector<DataType> supportedTypes =
1697  {
1704  };
1705 
1706  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1707  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1708 }
1709 
1710 void LogSoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1711 {
1712  const std::string descriptorName{"LogSoftmaxQueueDescriptor"};
1713 
1714  ValidateNumInputs(workloadInfo, descriptorName, 1);
1715  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1716 
1717  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1718  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1719 
1720  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1721 
1722  std::vector<DataType> supportedTypes =
1723  {
1727  };
1728 
1729  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1730  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1731 }
1732 
1733 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1734 {
1735  const std::string descriptorName{"ConstantQueueDescriptor"};
1736 
1737  ValidateNumInputs(workloadInfo, descriptorName, 0);
1738  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1739 
1740  if (!m_LayerOutput)
1741  {
1742  throw InvalidArgumentException(descriptorName + ": No const input specified.");
1743  }
1744 
1745  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1746  ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1747 
1748  // Check the supported data types
1749  std::vector<DataType> supportedTypes =
1750  {
1759  };
1760 
1761  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1762 }
1763 
1764 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1765 {
1766  const std::string descriptorName{"ReshapeQueueDescriptor"};
1767 
1768  ValidateNumInputs(workloadInfo, descriptorName, 1);
1769  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1770 
1771  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1772  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1773 
1774  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1775 
1776  // Check the supported data types
1777  std::vector<DataType> supportedTypes =
1778  {
1787  };
1788 
1789  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1790  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1791 }
1792 
1794 {
1795  const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1796 
1797  ValidateNumInputs(workloadInfo, descriptorName, 1);
1798  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1799 
1800  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1801  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1802 
1803  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1804  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1805 
1806  if (m_Parameters.m_BlockShape.size() != 2)
1807  {
1808  throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1809  }
1810 
1811  if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1812  {
1813  throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1814  "dimensions as Block Shape.");
1815  }
1816 
1817  const TensorShape& inputShape = inputTensorInfo.GetShape();
1818 
1819  std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1820  std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1821 
1822  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1823 
1824  const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1825  widthPad.first + widthPad.second;
1826  const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1827  heightPad.first + heightPad.second;
1828 
1829  const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1830  inputShape[dimensionIndices.GetChannelsIndex()];
1831  const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1832 
1833  if (numOutputElements != numInputElements)
1834  {
1835  throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1836  to_string(numInputElements) + " after padding but output tensor has " +
1837  to_string(numOutputElements) + " elements.");
1838  }
1839 
1840  if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1841  {
1842  throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1843  "divisible by Block Shape in all spatial dimensions");
1844  }
1845 
1846  std::vector<DataType> supportedTypes =
1847  {
1854  };
1855 
1856  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1857  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1858 }
1859 
1861 {
1862  const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1863 
1864  ValidateNumInputs(workloadInfo, descriptorName, 1);
1865  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1866 
1867  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1868  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1869 
1870  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1871  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1872 
1873  std::vector<DataType> supportedTypes =
1874  {
1881  };
1882 
1883  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1884  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1885 
1886  ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1887 
1888  if (m_Parameters.m_BlockSize == 0)
1889  {
1890  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
1891  }
1892 
1893  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1894  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1895  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1896  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1897 
1898  const TensorShape& inputShape = inputTensorInfo.GetShape();
1899  if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1900  {
1901  throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1902  "by block size in all spatial dimensions");
1903  }
1904 
1905  const TensorShape& outputShape = outputTensorInfo.GetShape();
1906  if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1907  {
1908  throw InvalidArgumentException(descriptorName + ": The depth of the output tensor"
1909  "must be divisible by the square of block size." );
1910  }
1911 }
1912 
1913 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1914 {
1915  const std::string descriptorName{"FloorQueueDescriptor"};
1916 
1917  ValidateNumInputs(workloadInfo, descriptorName, 1);
1918  ValidateNumOutputs(workloadInfo, descriptorName, 1);
1919 
1920  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1921  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1922 
1923  std::vector<DataType> supportedTypes =
1924  {
1929  };
1930 
1931  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1932  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1933  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1934  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1935 }
1936 
1937 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1938 {
1939  // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1940 
1941  const std::string descriptorName{"LstmQueueDescriptor"};
1942 
1943  // check dimensions of all inputs and outputs
1944  if (workloadInfo.m_InputTensorInfos.size() != 3)
1945  {
1946  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1947  }
1948  if (workloadInfo.m_OutputTensorInfos.size() != 4)
1949  {
1950  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1951  }
1952 
1953  std::vector<DataType> supportedTypes =
1954  {
1959  };
1960 
1961  // check for supported type of one input and match them with all the other input and output
1962  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1963 
1964  // type matches all other inputs
1965  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1966  {
1967  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1968  workloadInfo.m_InputTensorInfos[i],
1969  descriptorName,
1970  "input_0",
1971  "input_" + std::to_string(i));
1972  }
1973  // type matches all other outputs
1974  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1975  {
1976  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1977  workloadInfo.m_OutputTensorInfos[i],
1978  "LstmQueueDescriptor",
1979  "input_0",
1980  "output_" + std::to_string(i));
1981  }
1982 
1983  // Making sure clipping parameters have valid values.
1984  // == 0 means no clipping
1985  // > 0 means clipping
1986  if (m_Parameters.m_ClippingThresCell < 0.0f)
1987  {
1988  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
1989  }
1990  if (m_Parameters.m_ClippingThresProj < 0.0f)
1991  {
1992  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
1993  }
1994 
1995  // Inferring batch size, number of outputs and number of cells from the inputs.
1996  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1997  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1998  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1999  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
2000  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
2001  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
2002 
2003  // input tensor
2004  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
2005  descriptorName + " input_0");
2006  // outputStateInTensor
2007  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
2008  descriptorName + " input_1");
2009  // outputStateInTensor
2010  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
2011  descriptorName + " input_2");
2012  // scratchBufferTensor
2013  unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
2014  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
2015  descriptorName + " output_0");
2016  // outputStateOutTensor
2017  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
2018  descriptorName + " output_1");
2019  // cellStateOutTensor
2020  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
2021  descriptorName + " output_2");
2022  // outputTensor
2023  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
2024  descriptorName + " output_3");
2025 
2026  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
2027  if ( m_InputToInputWeights )
2028  {
2029  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
2030  (n_cell * n_input), "InputLayerNormWeights");
2031  }
2032 
2033  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
2034  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
2035  (n_cell * n_input), "InputToForgetWeights");
2036 
2037  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
2038  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
2039  (n_cell * n_input), "InputToCellWeights");
2040 
2041  if ( m_RecurrentToInputWeights )
2042  {
2043  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
2044  (n_cell * n_output), "RecurrentToInputWeights");
2045  }
2046 
2047  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
2048  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
2049  (n_cell * n_output), "RecurrentToForgetWeights");
2050 
2051  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
2052  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
2053  (n_cell * n_output), "RecurrentToCellWeights");
2054 
2055  // Make sure the input-gate's parameters are either both present (regular
2056  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
2057  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
2058  !m_Parameters.m_CifgEnabled) ||
2059  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
2060  m_Parameters.m_CifgEnabled));
2061  if (!cifg_weights_all_or_none)
2062  {
2063  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
2064  "RecurrentToInputWeights must either both be present (regular LSTM) "
2065  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
2066  "accordingly.");
2067  }
2068 
2069  if ( m_CellToInputWeights )
2070  {
2071  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
2072  n_cell, "CellToInputWeights");
2073  }
2074  if ( m_CellToForgetWeights )
2075  {
2076  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
2077  n_cell, "CellToForgetWeights");
2078  }
2079  if ( m_CellToOutputWeights )
2080  {
2081  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
2082  n_cell, "CellToOutputWeights");
2083  }
2084 
2085  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
2086  bool peephole_weights_all_or_none =
2087  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
2088  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
2089  || ( !m_CellToInputWeights && !m_CellToForgetWeights
2090  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
2091  if (!peephole_weights_all_or_none)
2092  {
2093  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
2094  }
2095 
2096  // Make sure the input gate bias is present only when not a CIFG-LSTM.
2097  if (m_Parameters.m_CifgEnabled)
2098  {
2099  if (m_InputGateBias)
2100  {
2101  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
2102  }
2103  }
2104  else
2105  {
2106  if (!m_InputGateBias)
2107  {
2108  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
2109  "must be present.");
2110  }
2111  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
2112  n_cell, "InputGateBias");
2113  }
2114 
2115  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
2116  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
2117 
2118  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
2119  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
2120 
2121  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
2122  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
2123 
2124  if (m_ProjectionWeights)
2125  {
2126  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
2127  (n_cell * n_output), "ProjectionWeights");
2128  }
2129  if (m_ProjectionBias)
2130  {
2131  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
2132  }
2133 
2134  // Making sure the projection tensors are consistent:
2135  // 1) If projection weight is not present, then projection bias should not be
2136  // present.
2137  // 2) If projection weight is present, then projection bias is optional.
2138  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
2139  !m_Parameters.m_ProjectionEnabled)
2140  || (m_ProjectionWeights && !m_ProjectionBias &&
2141  m_Parameters.m_ProjectionEnabled)
2142  || (m_ProjectionWeights && m_ProjectionBias &&
2143  m_Parameters.m_ProjectionEnabled));
2144  if (!projecton_tensors_consistent)
2145  {
2146  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
2147  }
2148 
2149  // The four layer normalization weights either all have values or none of them have values. Additionally, if
2150  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
2151  // either all have values or none of them have values. Layer normalization is used when the values of all the
2152  // layer normalization weights are present
2153  if (m_InputLayerNormWeights)
2154  {
2155  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
2156  }
2157  if (m_ForgetLayerNormWeights)
2158  {
2159  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2160  }
2161  if (m_CellLayerNormWeights)
2162  {
2163  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2164  }
2165  if (m_OutputLayerNormWeights)
2166  {
2167  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2168  }
2169 
2170  if (m_Parameters.m_LayerNormEnabled)
2171  {
2172  if (!m_Parameters.m_CifgEnabled)
2173  {
2174  if (!m_InputLayerNormWeights)
2175  {
2176  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
2177  "disabled but InputLayerNormWeights are not present");
2178  }
2179  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2180  1, n_cell, "InputLayerNormWeights");
2181  }
2182  else if (m_InputLayerNormWeights)
2183  {
2184  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
2185  "enabled");
2186  }
2187 
2188  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
2189  "ForgetLayerNormWeights");
2190  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
2191 
2192  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
2193  "OutputLayerNormWeights");
2194  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
2195 
2196  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
2197  "CellLayerNormWeights");
2198  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
2199  }
2200  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2201  {
2202  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
2203  "normalisation weights are present.");
2204  }
2205 }
2206 
2208 {
2209  const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"};
2210 
2211  ValidateNumInputs(workloadInfo, descriptorName, 1);
2212  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2213 
2214  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2215  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2216 
2217  if (inputTensorInfo.GetDataType() != DataType::BFloat16)
2218  {
2219  throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16.");
2220  }
2221 
2222  if (outputTensorInfo.GetDataType() != DataType::Float32)
2223  {
2224  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2225  }
2226 
2227  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2228 }
2229 
2231 {
2232  const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
2233 
2234  ValidateNumInputs(workloadInfo, descriptorName, 1);
2235  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2236 
2237  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2238  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2239 
2240  if (inputTensorInfo.GetDataType() != DataType::Float32)
2241  {
2242  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2243  }
2244 
2245  if (outputTensorInfo.GetDataType() != DataType::BFloat16)
2246  {
2247  throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
2248  }
2249 
2250  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2251 }
2252 
2254 {
2255  const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
2256 
2257  ValidateNumInputs(workloadInfo, descriptorName, 1);
2258  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2259 
2260  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2261  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2262 
2263  if (inputTensorInfo.GetDataType() != DataType::Float32)
2264  {
2265  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
2266  }
2267 
2268  if (outputTensorInfo.GetDataType() != DataType::Float16)
2269  {
2270  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
2271  }
2272 
2273  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2274 }
2275 
2277 {
2278  const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
2279 
2280  ValidateNumInputs(workloadInfo, descriptorName, 1);
2281  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2282 
2283  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2284  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2285 
2286  if (inputTensorInfo.GetDataType() != DataType::Float16)
2287  {
2288  throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
2289  }
2290 
2291  if (outputTensorInfo.GetDataType() != DataType::Float32)
2292  {
2293  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
2294  }
2295 
2296  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2297 }
2298 
2299 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2300 {
2301  const std::string descriptorName{"DivisionQueueDescriptor"};
2302 
2303  ValidateNumInputs(workloadInfo, descriptorName, 2);
2304  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2305 
2306  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2307  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2308  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2309 
2310  std::vector<DataType> supportedTypes =
2311  {
2319  };
2320 
2321  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2322  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2323  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2324 
2325  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2326  inputTensorInfo1,
2327  outputTensorInfo,
2328  descriptorName,
2329  "input_0",
2330  "input_1");
2331 }
2332 
2334 {
2335  const std::string descriptorName{"SubtractionQueueDescriptor"};
2336 
2337  ValidateNumInputs(workloadInfo, descriptorName, 2);
2338  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2339 
2340  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2341  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2342  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2343 
2344  std::vector<DataType> supportedTypes =
2345  {
2353  };
2354 
2355  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2356  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2357  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2358 
2359  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2360  inputTensorInfo1,
2361  outputTensorInfo,
2362  descriptorName,
2363  "input_0",
2364  "input_1");
2365 }
2366 
2367 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2368 {
2369  const std::string descriptorName{"MaximumQueueDescriptor"};
2370 
2371  ValidateNumInputs(workloadInfo, descriptorName, 2);
2372  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2373 
2374  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2375  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2376  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2377 
2378  std::vector<DataType> supportedTypes =
2379  {
2387  };
2388 
2389  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2390  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2391  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2392 
2393  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2394  inputTensorInfo1,
2395  outputTensorInfo,
2396  descriptorName,
2397  "input_0",
2398  "input_1");
2399 }
2400 
2401 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2402 {
2403  const std::string descriptorName{"MeanQueueDescriptor"};
2404 
2405  ValidateNumInputs(workloadInfo, descriptorName, 1);
2406  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2407 
2408  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2409  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2410 
2411  std::vector<DataType> supportedTypes =
2412  {
2419  };
2420 
2421  // First check if input tensor data type is supported, then
2422  // check if this data type matches the output tensor data type
2423  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2424  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2425 
2426  if (m_Parameters.m_KeepDims)
2427  {
2428  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2429  }
2430  else if (m_Parameters.m_Axis.empty())
2431  {
2432  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
2433  }
2434  else
2435  {
2436  unsigned int outputDim =
2437  inputTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
2438  ValidateTensorNumDimensions(outputTensorInfo,
2439  descriptorName,
2440  outputDim > 0 ? outputDim : 1,
2441  "output");
2442  }
2443 }
2444 
2445 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2446 {
2447  const std::string descriptorName{"PadQueueDescriptor"};
2448 
2449  ValidateNumInputs(workloadInfo, descriptorName, 1);
2450  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2451 
2452  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2453  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2454 
2455  // input and output should have the same number of dimensions
2456  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
2457 
2458  // there should be entry in the pad list for each dimension in the input tensor
2459  if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
2460  throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
2461  "as there are dimensions in the input tensor that is " +
2462  std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
2463  " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
2464  }
2465 }
2466 
2467 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2468 {
2469  const std::string descriptorName{"QuantizeQueueDescriptor"};
2470 
2471  ValidateNumInputs(workloadInfo, descriptorName, 1);
2472  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2473 
2474  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2475  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2476 
2477  std::vector<DataType> supportedTypes =
2478  {
2486  };
2487 
2488  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2489 
2490  if (!IsQuantizedType(outputTensorInfo.GetDataType()))
2491  {
2492  throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
2493  }
2494 }
2495 
2497 {
2498  const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
2499 
2500  ValidateNumInputs(workloadInfo, descriptorName, 1);
2501  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2502 
2503  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2504  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2505 
2506  std::vector<DataType> supportedTypes =
2507  {
2514  };
2515 
2516  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2517  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2518 }
2519 
2521 {
2522  const std::string descriptorName{"StridedSliceQueueDescriptor"};
2523 
2524  ValidateNumInputs(workloadInfo, descriptorName, 1);
2525  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2526 
2527  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2528  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2529 
2530  std::vector<DataType> supportedTypes =
2531  {
2538  };
2539 
2540  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2541  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2542 
2543  ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2544 
2545  const uint32_t rank = inputTensorInfo.GetNumDimensions();
2546  if (rank > 4)
2547  {
2548  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
2549  }
2550 
2551  // Begin, End & Stride length must be of rank(input0)
2552  if (m_Parameters.m_Begin.size() != rank)
2553  {
2554  throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2555  }
2556 
2557  if (m_Parameters.m_End.size() != rank)
2558  {
2559  throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2560  }
2561 
2562  if (m_Parameters.m_Stride.size() != rank)
2563  {
2564  throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2565  }
2566 
2567  // Stride entries must be non-zero
2568  for (auto& stride : m_Parameters.m_Stride)
2569  {
2570  if (stride == 0)
2571  {
2572  throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2573  }
2574  }
2575 }
2576 
2577 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2578 {
2579  const std::string descriptorName{"MinimumQueueDescriptor"};
2580 
2581  ValidateNumInputs(workloadInfo, descriptorName, 2);
2582  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2583 
2584  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2585  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2586  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2587 
2588  std::vector<DataType> supportedTypes =
2589  {
2597  };
2598 
2599  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2600  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2601  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2602 
2603  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2604  inputTensorInfo1,
2605  outputTensorInfo,
2606  descriptorName,
2607  "input_0",
2608  "input_1");
2609 }
2610 
2611 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2612 {
2613  const std::string descriptorName{"DebugQueueDescriptor"};
2614 
2615  ValidateNumInputs(workloadInfo, descriptorName, 1);
2616  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2617 }
2618 
2619 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2620 {
2621  const std::string descriptorName{"EqualQueueDescriptor"};
2622 
2623  ValidateNumInputs(workloadInfo, descriptorName, 2);
2624  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2625 
2626  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2627  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2628  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2629 
2630  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2631  inputTensorInfo1,
2632  outputTensorInfo,
2633  descriptorName,
2634  "input_0",
2635  "input_1");
2636 
2637  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2638  {
2639  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2640  }
2641 }
2642 
2643 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2644 {
2645  const std::string descriptorName{"GreaterQueueDescriptor"};
2646 
2647  ValidateNumInputs(workloadInfo, descriptorName, 2);
2648  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2649 
2650  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2651  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2652  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2653 
2654  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2655  inputTensorInfo1,
2656  outputTensorInfo,
2657  descriptorName,
2658  "input_0",
2659  "input_1");
2660 
2661  if (outputTensorInfo.GetDataType() != DataType::Boolean)
2662  {
2663  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2664  }
2665 }
2666 
2667 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2668 {
2669  const std::string descriptorName{"RsqrtQueueDescriptor"};
2670 
2671  ValidateNumInputs(workloadInfo, descriptorName, 1);
2672  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2673 
2674  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2675  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2676 
2677  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2678 
2679  std::vector<DataType> supportedTypes =
2680  {
2687  };
2688 
2689  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2690  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2691 }
2692 
2693 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2694 {
2695  const std::string descriptorName{"GatherQueueDescriptor"};
2696 
2697  ValidateNumInputs(workloadInfo, descriptorName, 2);
2698  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2699 
2700  const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2701  if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2702  {
2703  throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2704  }
2705 
2706  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2707  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2708 
2709  std::vector<DataType> supportedTypes =
2710  {
2718  };
2719 
2720  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2721 
2722  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2723 
2724  unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2725  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2726 }
2727 
2729 {
2730  const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2731 
2732  ValidateNumInputs(workloadInfo, descriptorName, 2);
2733 
2734  if (workloadInfo.m_OutputTensorInfos.size() != 4)
2735  {
2736  throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2737  to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2738  }
2739 
2740  if (m_Anchors == nullptr)
2741  {
2742  throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2743  }
2744 
2745  const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2746  const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2747  const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2748 
2749  const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2750  const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2751  const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2752  const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2753 
2754  ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2755  ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2756  ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2757 
2758  const std::vector<DataType> supportedInputTypes =
2759  {
2766  };
2767 
2768  ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2769  ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2770  ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2771 
2772  ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2773  ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2774  ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2775  ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2776 
2777  // NOTE: Output is always Float32 regardless of input type
2778  ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2779  ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2780  ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2781  ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2782 
2783  if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2784  {
2785  throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2786  "must be positive and less than or equal to 1.");
2787  }
2788 
2789  if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2790  {
2791  throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2792  "should be equal to number of classes + 1.");
2793  }
2794 }
2795 
2796 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2797 {
2798  const std::string& descriptorName{"DequantizeQueueDescriptor"};
2799 
2800  ValidateNumInputs(workloadInfo, descriptorName, 1);
2801  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2802 
2803  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2804  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2805 
2806  if (!IsQuantizedType(inputTensorInfo.GetDataType()))
2807  {
2808  throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2809  }
2810 
2811  std::vector<DataType> supportedTypes =
2812  {
2816  };
2817 
2818  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2819 }
2820 
2821 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2822 {
2823  const std::string& descriptorName{"MergeQueueDescriptor"};
2824 
2825  ValidateNumInputs(workloadInfo, descriptorName, 2);
2826  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2827 
2828  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2829  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2830  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2831 
2832  ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2833  ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2834 
2835  ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2836  ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2837 }
2838 
2839 void ShapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2840 {
2841  const std::string& descriptorName{"ShapeQueueDescriptor"};
2842 
2843  ValidateNumInputs(workloadInfo, descriptorName, 1);
2844  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2845 
2846  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2847  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2848 
2849  std::vector<DataType> supportedTypes =
2850  {
2860  };
2861 
2862  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2863  ValidateDataTypes(outputTensorInfo, {DataType::Signed32}, descriptorName);
2864 }
2865 
2866 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2867 {
2868  const std::string& descriptorName{"SwitchQueueDescriptor"};
2869 
2870  ValidateNumInputs(workloadInfo, descriptorName, 2);
2871  ValidateNumOutputs(workloadInfo, descriptorName, 2);
2872 
2873  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2874  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2875 
2876  const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2877  const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2878 
2879  std::vector<DataType> supportedTypes =
2880  {
2886  };
2887 
2888  ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2889  ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2890 
2891  ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2892  ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2893 
2894  ValidateTensorShapesMatch(inputTensorInfo0,
2895  outputTensorInfo0,
2896  descriptorName,
2897  "input_0",
2898  "output_0");
2899 
2900  ValidateTensorShapesMatch(inputTensorInfo0,
2901  outputTensorInfo1,
2902  descriptorName,
2903  "input_0",
2904  "output_1");
2905 }
2906 
2907 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
2908 {
2909  // This is internally generated so it should not need validation.
2910 }
2911 
2912 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2913 {
2914  const std::string& descriptorName{"PreluQueueDescriptor"};
2915 
2916  ValidateNumInputs(workloadInfo, descriptorName, 2);
2917  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2918 
2919  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2920  const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2921  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2922 
2923  std::vector<DataType> supportedTypes
2924  {
2931  };
2932 
2933  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2934  ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2935 
2936  ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2937 
2938  ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2939  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2940 
2941  ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2942  alphaTensorInfo,
2943  outputTensorInfo,
2944  descriptorName,
2945  "input",
2946  "alpha");
2947 }
2948 
2950 {
2951  const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2952 
2953  ValidateNumInputs(workloadInfo, descriptorName, 1);
2954  ValidateNumOutputs(workloadInfo, descriptorName, 1);
2955 
2956  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2957  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2958 
2959  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2960  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2961 
2962  ValidatePointer(m_Weight, descriptorName, "weight");
2963 
2964  const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2965  ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2966 
2967  ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2968 
2969  Optional<TensorInfo> optionalBiasTensorInfo;
2970  if (m_Parameters.m_BiasEnabled)
2971  {
2972  ValidatePointer(m_Bias, descriptorName, "bias");
2973 
2974  optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2975  const TensorInfo& biasTensorInfo = optionalBiasTensorInfo.value();
2976 
2977  ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
2978  ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2979  }
2980 
2981  ValidatePerAxisQuantization(inputTensorInfo,
2982  outputTensorInfo,
2983  weightTensorInfo,
2984  optionalBiasTensorInfo,
2985  descriptorName);
2986 
2987  std::vector<DataType> supportedTypes =
2988  {
2995  };
2996 
2997  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2998  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2999 }
3000 
3001 void TransposeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3002 {
3003  const std::string descriptorName{"TransposeQueueDescriptor"};
3004 
3005  ValidateNumInputs(workloadInfo, descriptorName, 1);
3006  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3007 
3008  const PermutationVector& mapping = m_Parameters.m_DimMappings;
3009 
3010  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3011  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3012 
3013  ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
3014  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
3015 
3016  for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
3017  {
3018  if (inputTensorInfo.GetShape()[mapping[i]] != outputTensorInfo.GetShape()[i])
3019  {
3020  throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(mapping[i]) +
3021  " (=" + to_string(inputTensorInfo.GetShape()[mapping[i]]) + ") " +
3022  "must match dst dimension " + to_string(i) +
3023  " (=" + to_string(outputTensorInfo.GetShape()[i]) + ")");
3024  }
3025  }
3026 
3027  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3028 }
3029 
3031 {
3032  const std::string descriptorName{"TransposeQueueDescriptor"};
3033 
3034  ValidateNumInputs(workloadInfo, descriptorName, 1);
3035  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3036 
3037  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3038  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3039 
3040  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3041 }
3042 
3043 void QLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3044 {
3045  const std::string descriptorName{"QLstmQueueDescriptor"};
3046 
3047  // Validate number of inputs/outputs
3048  ValidateNumInputs(workloadInfo, descriptorName, 3);
3049  ValidateNumOutputs(workloadInfo, descriptorName, 3);
3050 
3051  // Input/output tensor info
3052  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3053  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[1];
3054  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[2];
3055 
3056  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3057  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3058  auto outputInfo = workloadInfo.m_OutputTensorInfos[2];
3059 
3060  // Supported types for various tensors in QLSTM
3061  std::vector<DataType> inputOutputSupportedTypes =
3062  {
3064  };
3065 
3066  std::vector<DataType> cellStateSupportedTypes =
3067  {
3069  };
3070 
3071  std::vector<DataType> weightsSupportedTypes =
3072  {
3074  };
3075 
3076  std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
3077  {
3079  };
3080 
3081  std::vector<DataType> biasSupportedTypes =
3082  {
3084  };
3085 
3086  // Validate types of input/output tensors
3087  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3088  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3089  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3090 
3091  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3092  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3093  ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
3094 
3095  // Validate matching types of input/output tensors
3096  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3097  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3098  "outputStateIn", "outputStateOut");
3099  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3100 
3101  // Infer number of batches, number of units, input size and output size from tensor dimensions
3102  const uint32_t numBatches = inputInfo.GetShape()[0];
3103  const uint32_t inputSize = inputInfo.GetShape()[1];
3104  const uint32_t outputSize = outputStateInInfo.GetShape()[1];
3105  const uint32_t numUnits = cellStateInInfo.GetShape()[1];
3106 
3107  // Validate number of dimensions and number of elements for input/output tensors
3108  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3109  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3110  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName + " cellStateIn");
3111 
3112  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3113  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName + " cellStateOut");
3114  ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName + " output");
3115 
3116  // Validate number of dimensions and number of elements for MANDATORY weight tensors
3117  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3118  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3119  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize), " InputToForgetWeights");
3120 
3121  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3122  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3123  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize), " InputToCellWeights");
3124 
3125  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3126  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3127  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize), " InputToOutputWeights");
3128 
3129  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3130  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3131  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
3132  " RecurrentToForgetWeights");
3133 
3134  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3135  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3136  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3137 
3138  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3139  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3140  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize), " RecurrentToCellWeights");
3141 
3142  // Validate data types for MANDATORY weights tensors (all should match each other)
3143  ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
3144 
3145  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
3146  "inputToForgetWeights", "inputToCellWeights");
3147  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3148  "inputToForgetWeights", "inputToOutputWeights");
3149 
3150  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3151  "inputToForgetWeights", "recurrentToForgeteights");
3152  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3153  "inputToForgetWeights", "recurrentToCellWeights");
3154  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3155  "inputToForgetWeights", "recurrentToOutputWeights");
3156 
3157  // Validate number of dimensions and number of elements for MANDATORY bias tensors
3158  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3159  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3160  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits, " ForgetGateBias");
3161 
3162  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3163  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3164  ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits, " CellBias");
3165 
3166  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3167  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3168  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits, " OutputGateBias");
3169 
3170  // Validate data types for MANDATORY bias tensors
3171  ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
3172 
3173  ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
3174  "forgetGateBias", "cellBias");
3175  ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
3176  "forgetGateBias", "outputGateBias");
3177 
3178  // Validate OPTIONAL params: CIFG (inputToInputWeights, recurrentToInputWeights, inputGateBias)
3179  const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
3180  !m_Parameters.m_CifgEnabled) ||
3181  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3182  !m_InputGateBias && m_Parameters.m_CifgEnabled));
3183 
3184  if (!allCifgParamsPresentOrNot)
3185  {
3186  throw InvalidArgumentException(descriptorName +
3187  ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present "
3188  "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be "
3189  "set appropriately.");
3190  }
3191 
3192  if (!m_Parameters.m_CifgEnabled)
3193  {
3194  // Validate number of dimensions and number of elements
3195  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3196  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize), " InputToInputWeights");
3197 
3198  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3199  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3200  " RecurrentToInputWeights");
3201 
3202  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3203  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits, " InputGateBias");
3204 
3205  // Validate data types
3206  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3207  "inputToForgetWeights", "inputToInputWeights");
3208  ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3209  "inputToForgetWeights", "recurrentToInputWeights");
3210  ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3211  "forgetGateBias", "inputGateBias");
3212  }
3213 
3214  // Validate OPTIONAL params: Peephole (cellToInputWeights, cellToForgetWeights, cellToOutputWeights)
3215  bool allPeepholeWeightsPresentOrNot =
3216  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3217  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3218  || (!m_CellToInputWeights && !m_CellToForgetWeights
3219  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3220 
3221  if (!allPeepholeWeightsPresentOrNot)
3222  {
3223  throw InvalidArgumentException(descriptorName +
3224  ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole "
3225  "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present "
3226  "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set "
3227  "appropriately.");
3228  }
3229 
3230  if (m_Parameters.m_PeepholeEnabled)
3231  {
3232  auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3233  ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits, " cellToForgetWeights");
3234  ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3235 
3236  auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3237  ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits, " cellToOutputWeights");
3238  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3239  "cellToForgetWeight", "cellToOutputWeights");
3240 
3241  if (!m_Parameters.m_CifgEnabled)
3242  {
3243  auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3244  ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits, " cellToInputWeights");
3245  ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3246  "cellToForgetWeights", "cellToInputWeights");
3247  }
3248  }
3249 
3250  // Validate OPTIONAL params: Layer Norm Weights
3251  bool allLayerNormWeightsPresentOrNot =
3252  (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3253  && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3254  || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3255  && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3256 
3257  if (!allLayerNormWeightsPresentOrNot)
3258  {
3259  throw InvalidArgumentException(descriptorName +
3260  ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights "
3261  "and CellLayerNormWeights should all be present (Layer Norm enabled) or not "
3262  "be present at all (Layer Norm disabled). InputLayerNormWeights should "
3263  "only be present when Layer Norm is enabled and CIFG is disabled. "
3264  "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3265  }
3266 
3267  if (m_Parameters.m_LayerNormEnabled)
3268  {
3269  auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3270  ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits, " forgetLayerNormWeights");
3271  ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3272 
3273  auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3274  ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits, " cellLayerNormWeights");
3275  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3276  "forgetLayerNormWeights", "cellLayerNormWeights");
3277 
3278  auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3279  ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits, " outputLayerNormWeights");
3280  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3281  "forgetLayerNormWeights", "outputLayerNormWeights");
3282 
3283  if (!m_Parameters.m_CifgEnabled)
3284  {
3285  auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3286  ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits, " inputLayerNormWeights");
3287  ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3288  "forgetLayerNormWeights", "inputLayerNormWeights");
3289  }
3290  }
3291 
3292  // Validate OPTIONAL params: Projection (projectionWeights, projectionBias)
3293  bool correctProjectionTensorsPresent =
3294  ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3295  (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3296  (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3297 
3298  if (!correctProjectionTensorsPresent)
3299  {
3300  throw InvalidArgumentException(descriptorName +
3301  ": If projection is enabled, ProjectionWeights should be present and "
3302  "ProjectionBias is optional. If projection is disabled, neither "
3303  "ProjectionWeights nor ProjectionBias should be present.");
3304  }
3305 
3306  if (m_Parameters.m_ProjectionEnabled)
3307  {
3308  auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3309  ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize), "ProjectionWeights");
3310  ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3311 
3312  if (m_ProjectionBias)
3313  {
3314  auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
3315  ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize, "ProjectionBias");
3316  ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3317  }
3318 
3319  }
3320  else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3321  outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3322  throw InvalidArgumentException(descriptorName +
3323  ": If projection is disabled, output quantization info (scale, offset) "
3324  "should match HiddenStateScale and HiddenStateZeroPoint.");
3325  }
3326 
3327 }
3328 
3330 {
3331  const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
3332 
3333  // Validate number of inputs/outputs
3334  ValidateNumInputs(workloadInfo, descriptorName, 3);
3335  ValidateNumOutputs(workloadInfo, descriptorName, 2);
3336 
3337  // Input/output tensor infos
3338  auto inputInfo = workloadInfo.m_InputTensorInfos[0];
3339  auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
3340  auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
3341 
3342  auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
3343  auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
3344 
3345  std::vector<DataType> inputOutputSupportedTypes =
3346  {
3348  };
3349 
3350  std::vector<DataType> cellStateSupportedTypes =
3351  {
3353  };
3354 
3355  std::vector<DataType> weightsSupportedTypes =
3356  {
3358  };
3359 
3360  std::vector<DataType> biasSupportedTypes =
3361  {
3363  };
3364 
3365  // Validate types of input/output tensors
3366  ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3367  ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3368  ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3369 
3370  ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3371  ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3372 
3373  // Validate matching types of input/output tensors
3374  ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3375  ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3376  "outputStateIn", "outputStateOut");
3377  ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3378 
3379  // Validate matching quantization info for input/output tensors
3380  ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
3381  ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
3382  ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
3383 
3384  // Infer number of batches, input size and output size from tensor dimensions
3385  const uint32_t numBatches = inputInfo.GetShape()[0];
3386  const uint32_t inputSize = inputInfo.GetShape()[1];
3387  const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3388 
3389  // Validate number of dimensions and number of elements for input/output tensors
3390  ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
3391  ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
3392  ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
3393  ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
3394  ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
3395 
3396  // Validate number of dimensions and number of elements for weights tensors
3397  ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
3398  auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3399  ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
3400 
3401  ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
3402  auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3403  ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
3404 
3405  ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
3406  auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3407  ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
3408 
3409  ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
3410  auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3411  ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
3412 
3413  ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
3414  auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3415  ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
3416 
3417  ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
3418  auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3419  ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3420  " RecurrentToForgetWeights");
3421 
3422  ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
3423  auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3424  ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3425 
3426  ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
3427  auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3428  ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
3429 
3430  // Validate data types for weights tensors (all should match each other)
3431  ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3432 
3433  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3434  "inputToInputWeights", "inputToForgetWeights");
3435  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3436  "inputToInputWeights", "inputToCellWeights");
3437  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3438  "inputToInputWeights", "inputToOutputWeights");
3439 
3440  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3441  "inputToInputWeights", "recurrentToInputWeights");
3442  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3443  "inputToInputWeights", "recurrentToForgeteights");
3444  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3445  "inputToInputWeights", "recurrentToCellWeights");
3446  ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3447  "inputToInputWeights", "recurrentToOutputWeights");
3448 
3449  // Validate matching quantization info for weight tensors (all should match each other)
3450  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3451  descriptorName, "inputToInputWeights", "inputToForgetWeights");
3452  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3453  descriptorName, "inputToInputWeights", "inputToCellWeights");
3454  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3455  descriptorName, "inputToInputWeights", "inputToOutputWeights");
3456 
3457  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3458  descriptorName, "inputToInputWeights", "recurrentToInputWeights");
3459  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3460  descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
3461  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3462  descriptorName, "inputToInputWeights", "recurrentToCellWeights");
3463  ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3464  descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
3465 
3466  // Validate number of dimensions and number of elements in bias tensors
3467  ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
3468  auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3469  ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
3470 
3471  ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
3472  auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3473  ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
3474 
3475  ValidatePointer(m_CellBias, descriptorName, "CellBias");
3476  auto cellBiasInfo = m_CellBias->GetTensorInfo();
3477  ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
3478 
3479  ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
3480  auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3481  ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
3482 
3483  // Validate data types for bias tensors (all should match each other)
3484  ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3485 
3486  ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3487  "inputGateBias", "forgetGateBias");
3488  ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3489  "inputGateBias", "cellBias");
3490  ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3491  "inputGateBias", "outputGateBias");
3492 
3493  // Validate bias tensor quantization info
3494  ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3495  ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3496  ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3497  ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3498 }
3499 
3500 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3501 {
3502  const std::string descriptorName{"AbsQueueDescriptor"};
3503 
3504  ValidateNumInputs(workloadInfo, descriptorName, 1);
3505  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3506 
3507  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3508  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3509 
3510  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3511 
3512  std::vector<DataType> supportedTypes =
3513  {
3521  };
3522 
3523  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3524  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3525 }
3526 
3527 void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3528 {
3529  const std::string descriptorName{"SliceQueueDescriptor"};
3530 
3531  ValidateNumInputs(workloadInfo, descriptorName, 1);
3532  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3533 
3534  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3535  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3536 
3537  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3538 
3539  const unsigned int rank = inputTensorInfo.GetNumDimensions();
3540  if (rank > 4)
3541  {
3542  throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
3543  }
3544 
3545  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank, "output");
3546 
3547  // Check if m_Begin and m_Size have the expected length
3548  if (m_Parameters.m_Begin.size() != rank)
3549  {
3550  throw InvalidArgumentException(descriptorName +
3551  ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3552  }
3553  if (m_Parameters.m_Size.size() != rank)
3554  {
3555  throw InvalidArgumentException(descriptorName +
3556  ": Length of size descriptor must equal rank " + std::to_string(rank));
3557  }
3558 
3559  // Check if the shape of the output tensor matches m_Size
3560  const TensorShape& outputShape = outputTensorInfo.GetShape();
3561  for (unsigned int i = 0u; i < rank; ++i)
3562  {
3563  if (m_Parameters.m_Size[i] != outputShape[i])
3564  {
3565  throw InvalidArgumentException(descriptorName + ": Size descriptor does not match output tensor.");
3566  }
3567  }
3568 
3569  // Check if the sum of begin offset and size in a given dimension
3570  // does not exceed the size of corresponding input
3571  const TensorShape& inputShape = inputTensorInfo.GetShape();
3572  for(unsigned int i = 0u; i < rank; ++i)
3573  {
3574  if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
3575  {
3576  throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
3577  std::to_string(i) + " exceeds input size.");
3578  }
3579  }
3580 }
3581 
3583 {
3584  const std::string descriptorName{"DepthToSpaceQueueDescriptor"};
3585 
3586  ValidateNumInputs(workloadInfo, descriptorName, 1);
3587  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3588 
3589  const TensorInfo& inputInfo = workloadInfo.m_InputTensorInfos[0];
3590  const TensorInfo& outputInfo = workloadInfo.m_OutputTensorInfos[0];
3591 
3592  ValidateTensorNumDimensions(inputInfo, descriptorName, 4, "input");
3593  ValidateTensorNumDimensions(outputInfo, descriptorName, 4, "output");
3594 
3595  std::vector<DataType> supportedTypes =
3596  {
3603  };
3604 
3605  ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3606  ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3607 
3608  ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName, "input", "output");
3609 
3610  if (m_Parameters.m_BlockSize == 0)
3611  {
3612  throw InvalidArgumentException(descriptorName + ": Block size cannot be 0.");
3613  }
3614 
3615  DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
3616  const unsigned int wIndex = dimensionIndices.GetWidthIndex();
3617  const unsigned int hIndex = dimensionIndices.GetHeightIndex();
3618  const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
3619 
3620  const TensorShape& outputShape = outputInfo.GetShape();
3621  if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3622  {
3623  throw InvalidArgumentException(descriptorName + ": Output width and height shape"
3624  "must be divisible by block size.");
3625  }
3626 
3627  const TensorShape& inputShape = inputInfo.GetShape();
3628  if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3629  {
3630  throw InvalidArgumentException(descriptorName + ": The depth of the input tensor"
3631  "must be divisible by the square of block size." );
3632  }
3633 }
3634 
3635 void ComparisonQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3636 {
3637  const std::string descriptorName{"ComparisonQueueDescriptor"};
3638 
3639  ValidateNumInputs(workloadInfo, descriptorName, 2);
3640  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3641 
3642  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3643  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3644  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3645 
3646  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3647  inputTensorInfo1,
3648  outputTensorInfo,
3649  descriptorName,
3650  "input_0",
3651  "input_1");
3652 
3653  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3654  {
3655  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3656  }
3657 }
3658 
3660 {
3661  const std::string descriptorName{"ElementwiseUnaryQueueDescriptor"};
3662 
3663  ValidateNumInputs(workloadInfo, descriptorName, 1);
3664  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3665 
3666  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3667  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3668 
3669  ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3670 
3671  std::vector<DataType> supportedTypes =
3672  {
3680  };
3681 
3682  std::vector<DataType> logicalSupportedTypes =
3683  {
3685  };
3686 
3687  if (m_Parameters.m_Operation == UnaryOperation::LogicalNot)
3688  {
3689  ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
3690  }
3691  else
3692  {
3693  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3694  }
3695 
3696 
3697  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3698 }
3699 
3700 void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3701 {
3702  const std::string descriptorName{"RankQueueDescriptor"};
3703 
3704  ValidateNumInputs(workloadInfo, descriptorName, 1);
3705  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3706 
3707  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3708  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3709 
3710  ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
3711  ValidateTensorNumElements(outputTensorInfo, descriptorName, 1, "output");
3712 
3713  std::vector<DataType> supportedTypes =
3714  {
3723  };
3724 
3725  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3726  ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
3727 }
3728 
3730 {
3731  const std::string descriptorName{"LogicalBinaryQueueDescriptor"};
3732 
3733  ValidateNumInputs(workloadInfo, descriptorName, 2);
3734  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3735 
3736  const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
3737  const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
3738  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3739 
3740  ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3741  inputTensorInfo1,
3742  outputTensorInfo,
3743  descriptorName,
3744  "input_0",
3745  "input_1");
3746 
3747  if (inputTensorInfo0.GetDataType() != DataType::Boolean)
3748  {
3749  throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean.");
3750  }
3751 
3752  if (inputTensorInfo1.GetDataType() != DataType::Boolean)
3753  {
3754  throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean.");
3755  }
3756 
3757  if (outputTensorInfo.GetDataType() != DataType::Boolean)
3758  {
3759  throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
3760  }
3761 }
3762 
3763 void ReduceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
3764 {
3765  const std::string descriptorName{"ReduceQueueDescriptor"};
3766 
3767  ValidateNumInputs(workloadInfo, descriptorName, 1);
3768  ValidateNumOutputs(workloadInfo, descriptorName, 1);
3769 
3770  const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
3771  const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
3772 
3773  std::vector<DataType> supportedTypes =
3774  {
3782  };
3783 
3784  ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3785  ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
3786 }
3787 
3789 {
3790  // Modified from LstmQueueDescriptor::Validate to support UnidirectionalSequenceLstm
3791 
3792  const std::string descriptorName{"UnidirectionalSequenceLstmQueueDescriptor"};
3793 
3794  // check dimensions of all inputs and outputs
3795  if (workloadInfo.m_InputTensorInfos.size() != 3)
3796  {
3797  throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
3798  }
3799  if (workloadInfo.m_OutputTensorInfos.size() != 1)
3800  {
3801  throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
3802  }
3803 
3804  std::vector<DataType> supportedTypes =
3805  {
3807  };
3808 
3809  // check for supported type of one input and match them with all the other input and output
3810  ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
3811 
3812  // type matches all other inputs
3813  for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
3814  {
3815  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3816  workloadInfo.m_InputTensorInfos[i],
3817  descriptorName,
3818  "input_0",
3819  "input_" + std::to_string(i));
3820  }
3821  // type matches all other outputs
3822  for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
3823  {
3824  ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
3825  workloadInfo.m_OutputTensorInfos[i],
3826  "LstmQueueDescriptor",
3827  "input_0",
3828  "output_" + std::to_string(i));
3829  }
3830 
3831  // Making sure clipping parameters have valid values.
3832  // == 0 means no clipping
3833  // > 0 means clipping
3834  if (m_Parameters.m_ClippingThresCell < 0.0f)
3835  {
3836  throw InvalidArgumentException(descriptorName + ": negative cell clipping threshold is invalid");
3837  }
3838  if (m_Parameters.m_ClippingThresProj < 0.0f)
3839  {
3840  throw InvalidArgumentException(descriptorName + ": negative projection clipping threshold is invalid");
3841  }
3842 
3843  unsigned int batchIndx = 0;
3844  unsigned int inputIndx = 1;
3845  uint32_t timeStep = 1;
3846  unsigned int timeIndx = 1;
3847  inputIndx = 2;
3848  if (m_Parameters.m_TimeMajor)
3849  {
3850  batchIndx = 1;
3851  timeIndx = 0;
3852 
3853  }
3854  timeStep = workloadInfo.m_InputTensorInfos[0].GetShape()[timeIndx];
3855 
3856  // Inferring batch size, number of outputs and number of cells from the inputs.
3857  const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[inputIndx];
3858  const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[batchIndx];
3859  ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
3860  const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
3861  ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
3862  const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
3863 
3864  // input tensor
3865  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 3, (timeStep * n_batch * n_input),
3866  descriptorName + " input_0");
3867  // outputStateInTensor
3868  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
3869  descriptorName + " input_1");
3870  // outputStateInTensor
3871  ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
3872  descriptorName + " input_2");
3873 
3874  // outputTensor
3875  ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 3, (timeStep * n_batch * n_output),
3876  descriptorName + " output_0");
3877 
3878  // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
3879  if ( m_InputToInputWeights )
3880  {
3881  ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
3882  (n_cell * n_input), "InputLayerNormWeights");
3883  }
3884 
3885  ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
3886  ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
3887  (n_cell * n_input), "InputToForgetWeights");
3888 
3889  ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
3890  ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
3891  (n_cell * n_input), "InputToCellWeights");
3892 
3893  if ( m_RecurrentToInputWeights )
3894  {
3895  ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
3896  (n_cell * n_output), "RecurrentToInputWeights");
3897  }
3898 
3899  ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
3900  ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
3901  (n_cell * n_output), "RecurrentToForgetWeights");
3902 
3903  ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
3904  ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
3905  (n_cell * n_output), "RecurrentToCellWeights");
3906 
3907  // Make sure the input-gate's parameters are either both present (regular
3908  // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
3909  bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
3910  !m_Parameters.m_CifgEnabled) ||
3911  (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3912  m_Parameters.m_CifgEnabled));
3913  if (!cifg_weights_all_or_none)
3914  {
3915  throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
3916  "RecurrentToInputWeights must either both be present (regular LSTM) "
3917  "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
3918  "accordingly.");
3919  }
3920 
3921  if ( m_CellToInputWeights )
3922  {
3923  ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
3924  n_cell, "CellToInputWeights");
3925  }
3926  if ( m_CellToForgetWeights )
3927  {
3928  ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
3929  n_cell, "CellToForgetWeights");
3930  }
3931  if ( m_CellToOutputWeights )
3932  {
3933  ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
3934  n_cell, "CellToOutputWeights");
3935  }
3936 
3937  // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
3938  bool peephole_weights_all_or_none =
3939  (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3940  && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3941  || ( !m_CellToInputWeights && !m_CellToForgetWeights
3942  && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3943  if (!peephole_weights_all_or_none)
3944  {
3945  throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
3946  }
3947 
3948  // Make sure the input gate bias is present only when not a CIFG-LSTM.
3949  if (m_Parameters.m_CifgEnabled)
3950  {
3951  if (m_InputGateBias)
3952  {
3953  throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
3954  }
3955  }
3956  else
3957  {
3958  if (!m_InputGateBias)
3959  {
3960  throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
3961  "must be present.");
3962  }
3963  ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
3964  n_cell, "InputGateBias");
3965  }
3966 
3967  ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
3968  ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
3969 
3970  ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
3971  ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
3972 
3973  ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
3974  ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
3975 
3976  if (m_ProjectionWeights)
3977  {
3978  ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
3979  (n_cell * n_output), "ProjectionWeights");
3980  }
3981  if (m_ProjectionBias)
3982  {
3983  ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
3984  }
3985 
3986  // Making sure the projection tensors are consistent:
3987  // 1) If projection weight is not present, then projection bias should not be
3988  // present.
3989  // 2) If projection weight is present, then projection bias is optional.
3990  bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
3991  !m_Parameters.m_ProjectionEnabled)
3992  || (m_ProjectionWeights && !m_ProjectionBias &&
3993  m_Parameters.m_ProjectionEnabled)
3994  || (m_ProjectionWeights && m_ProjectionBias &&
3995  m_Parameters.m_ProjectionEnabled));
3996  if (!projecton_tensors_consistent)
3997  {
3998  throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
3999  }
4000 
4001  // The four layer normalization weights either all have values or none of them have values. Additionally, if
4002  // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
4003  // either all have values or none of them have values. Layer normalization is used when the values of all the
4004  // layer normalization weights are present
4005  if (m_InputLayerNormWeights)
4006  {
4007  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
4008  }
4009  if (m_ForgetLayerNormWeights)
4010  {
4011  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4012  }
4013  if (m_CellLayerNormWeights)
4014  {
4015  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4016  }
4017  if (m_OutputLayerNormWeights)
4018  {
4019  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4020  }
4021 
4022  if (m_Parameters.m_LayerNormEnabled)
4023  {
4024  if (!m_Parameters.m_CifgEnabled)
4025  {
4026  if (!m_InputLayerNormWeights)
4027  {
4028  throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
4029  "disabled but InputLayerNormWeights are not present");
4030  }
4031  ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
4032  1, n_cell, "InputLayerNormWeights");
4033  }
4034  else if (m_InputLayerNormWeights)
4035  {
4036  throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
4037  "enabled");
4038  }
4039 
4040  ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
4041  "ForgetLayerNormWeights");
4042  ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
4043 
4044  ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
4045  "OutputLayerNormWeights");
4046  ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
4047 
4048  ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
4049  "CellLayerNormWeights");
4050  ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
4051  }
4052  else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
4053  {
4054  throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
4055  "normalisation weights are present.");
4056  }
4057 }
4058 
4059 
4060 } // namespace armnn
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:434
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:280
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:448
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:496
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2021 ARM Limited and Contributors.
void Validate(const WorkloadInfo &workloadInfo) const
SizeType GetSize() const
Definition: Types.hpp:311
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:453
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:201
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:202
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
DataType
Definition: Types.hpp:35
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
Definition: Tensor.hpp:198
bool has_value() const noexcept
Definition: Optional.hpp:53
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
Contains information about TensorInfos of a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetChannelsIndex() const
bool IsQuantized() const
Definition: Tensor.cpp:506
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin