16 #include <boost/format.hpp> 17 #include <boost/numeric/conversion/cast.hpp> 27 switch (inputDataType)
29 case DataType::Float16:
30 return DataType::Float16;
32 case DataType::Float32:
33 return DataType::Float32;
34 case DataType::QAsymmS8:
35 return DataType::Signed32;
36 case DataType::QAsymmU8:
37 return DataType::Signed32;
38 case DataType::QSymmS8:
39 return DataType::Signed32;
40 case DataType::QSymmS16:
41 return DataType::Signed32;
44 return DataType::Float32;
54 std::string to_string(T value)
56 std::ostringstream os;
62 void ValidatePointer(
const void* ptr, std::string
const& descName, std::string
const& paramName)
67 paramName +
" parameter must be set.");
72 void ValidateTensorShapesMatch(
const TensorInfo& first,
74 std::string
const& descName,
75 std::string
const& firstName,
76 std::string
const& secondName)
81 + firstName +
" & " + secondName +
" must have identical shapes");
86 void ValidateNumInputs(
const WorkloadInfo& workloadInfo, std::string
const& descName,
const unsigned int expectedSize)
91 ": Requires exactly " + to_string(expectedSize) +
"input(s). " +
97 void ValidateNumOutputs(
const WorkloadInfo& workloadInfo, std::string
const& descName,
const unsigned int expectedSize)
102 ": Requires exactly " + to_string(expectedSize) +
" output(s). " +
108 void ValidateTensorNumDimensions(
const TensorInfo& tensor,
109 std::string
const& descName,
110 unsigned int numDimensions,
111 std::string
const& tensorName)
117 tensorName +
" tensor.");
122 void ValidateTensorNumElements(
const TensorInfo& tensor,
123 std::string
const& descName,
124 unsigned int numElements,
125 std::string
const& tensorName)
131 tensorName +
" tensor.");
136 void ValidateTensorNumDimNumElem(
const TensorInfo& tensorInfo,
137 unsigned int numDimension,
138 unsigned int numElements,
139 std::string
const& tensorName)
141 const std::string functionName{
"ValidateTensorNumDimNumElem"};
142 ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
143 ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
148 const std::string& descName, std::string
const& tensorName)
157 void ValidPerAxisQuantizedDataType(
const TensorInfo& tensor,
const std::string& descName,
const std::string& tensorName)
161 tensor.
GetDataType() != DataType::QuantizedSymm8PerAxis)
164 ": Expected data type which supports per-axis quantization scheme but got " +
171 void ValidateTensorQuantizationSpace(
const TensorInfo& first,
173 const std::string& descName,
174 std::string
const& firstName,
175 std::string
const& secondName)
187 if (firstDataType != secondDataType)
190 " must be of the same quantized type, " +
198 " must have the same quantization space, " +
207 void ValidateBiasTensorQuantization(
const TensorInfo& biasTensor,
210 const std::string& descName)
213 auto VerifyBiasQuantizationScale = [&descName](
float biasScale,
float expectedScale) ->
void 215 constexpr
float tolerance = 0.000001f;
216 if (std::abs(biasScale - expectedScale) > tolerance)
219 std::stringstream msg;
220 msg << std::setprecision(10) << descName <<
": Expected " << expectedScale <<
221 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
239 if (weightScales.size() != biasScales.size())
241 std::stringstream msg;
242 msg << descName <<
": Expected matchhing number of per-axis quantization scales, but got different " 243 <<
"values: weights=" << weightScales.size() <<
", biases=" << biasScales.size();
247 for (
size_t i = 0ul; i < biasScales.size(); ++i)
250 VerifyBiasQuantizationScale(biasScales[i], expectedScale);
262 void ValidateTensors(
const std::vector<ITensorHandle*>& vec,
263 unsigned int numExpected,
264 const std::string& descName,
265 const std::string& varName)
267 if (vec.empty() && numExpected > 0)
272 for (
unsigned int i = 0; i < numExpected; ++i)
282 void ValidateBroadcastTensorShapesMatch(
const TensorInfo& first,
285 std::string
const& descName,
286 std::string
const& firstName,
287 std::string
const& secondName)
294 + firstName +
" & " + secondName
295 +
" must have the same number of dimensions in order to be broadcasted");
298 std::vector<uint32_t> outputDims(numDims, 0u);
299 for (uint32_t i = 0; i < numDims; i++)
302 const bool dimsNotOne = (first.
GetShape()[i] != 1) && (second.
GetShape()[i] != 1);
303 if (dimsNotEqual && dimsNotOne)
309 TensorShape broadcastShape =
TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
310 if (broadcastShape != output.
GetShape())
313 + firstName +
" & " + secondName
314 +
" does not match the output shape");
320 const std::vector<armnn::DataType>& supportedTypes,
321 std::string
const& descName)
323 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.
GetDataType());
324 if (iterator == supportedTypes.end())
331 void ValidateTensorDataTypesMatch(
const TensorInfo& first,
333 std::string
const& descName,
334 std::string
const& firstName,
335 std::string
const& secondName)
340 " must have identical data types.");
345 void ValidateTensorNumElementsMatch(
const TensorInfo& first,
347 std::string
const& descName,
348 std::string
const& firstName,
349 std::string
const& secondName)
354 " must have the same number of elements.");
358 void ValidateWeightDataType(
const TensorInfo& inputInfo,
360 const std::string& descName)
366 const std::vector<DataType> validTypes =
371 DataType::QuantizedSymm8PerAxis
375 ValidateDataTypes(weightInfo, validTypes, descName);
379 ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName,
"input",
"weight");
383 void ValidatePerAxisQuantizationDimension(
const TensorInfo& tensorInfo,
384 const std::string& descName,
385 const std::string& tensorName)
391 boost::format(
"%1%: Quantization dimension for per-axis quantization not set on tensor %2%.")
392 % descName % tensorName));
395 if (quantizationDim.
value() != 0)
398 boost::format(
"%1%: Quantization dimension for per-axis quantization expected to be 0 on tensor %2%, " 399 "but got: %3%") % descName % tensorName % quantizationDim.
value()));
403 void ValidatePerAxisQuantizationOffset(
const TensorInfo& tensorInfo,
404 const std::string& descName,
405 const std::string& tensorName)
408 if (quantizationOffset != 0)
411 boost::format(
"%1%: Quantization offset for per-axis quantization expected to be 0 on tensor %2%, " 412 "but got: %3%") % descName % tensorName % quantizationOffset));
416 void ValidatePerAxisQuantization(
const TensorInfo& inputInfo,
420 const std::string& descName)
427 const bool canHavePerAxisQuantization = (
IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
429 if (!canHavePerAxisQuantization)
432 boost::format(
"%1%: Per-axis quantization parameters set on tensor %2%, " 433 "but data type does not support per-axis quantization.") % descName %
"weight"));
437 ValidPerAxisQuantizedDataType(weightInfo, descName,
"weight");
438 ValidatePerAxisQuantizationDimension(weightInfo, descName,
"weight");
439 ValidatePerAxisQuantizationOffset(weightInfo, descName,
"weight");
447 boost::format(
"%1%: Per-axis quantization parameters not set on bias tensor, despite being set on " 448 "weight tensor.") % descName));
451 ValidateTensorDataType(biasInfo, DataType::Signed32, descName,
"bias");
452 ValidatePerAxisQuantizationDimension(biasInfo, descName,
"bias");
453 ValidatePerAxisQuantizationOffset(biasInfo, descName,
"bias");
461 unsigned int numExpectedIn,
unsigned int numExpectedOut)
const 463 ValidateTensors(
m_Inputs, numExpectedIn, descName,
"input");
464 ValidateTensors(
m_Outputs, numExpectedOut, descName,
"output");
470 const std::string descriptorName{
"MemCopyQueueDescriptor"};
472 ValidateNumInputs(workloadInfo, descriptorName, 1);
473 ValidateNumOutputs(workloadInfo, descriptorName , 1);
478 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
479 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
484 boost::format(
"%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
488 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
493 descriptorName % i));
499 descriptorName % i));
507 ValidateNumInputs(workloadInfo,
"MemImportQueueDescriptor", 1);
508 ValidateNumOutputs(workloadInfo,
"MemImportQueueDescriptor" , 1);
513 boost::format(
"Number of input infos (%1%) is not 1.")
521 boost::format(
"Number of input infos (%1%) does not match the number of output infos (%2%)")
531 boost::format(
"Number of elements for tensor input and output %1% does not match")
539 boost::format(
"Number of inputs (%1%) is not 1.")
546 boost::format(
"Number of inputs (%1%) does not match the number of outputs (%2%)")
550 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
567 ValidateNumInputs(workloadInfo,
"MemSyncQueueDescriptor", 1);
568 ValidateNumOutputs(workloadInfo,
"MemSyncQueueDescriptor" , 1);
573 boost::format(
"Number of inputs (%1%) is not 1.")
580 boost::format(
"Number of outputs (%1%) is not 0.")
593 const std::string descriptorName{
"ActivationQueueDescriptor"};
595 ValidateNumInputs(workloadInfo, descriptorName, 1);
596 ValidateNumOutputs(workloadInfo, descriptorName, 1);
601 std::vector<DataType> supportedTypes =
611 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
612 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
613 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
618 const std::string descriptorName{
"ArgMinMaxQueueDescriptor"};
620 ValidateNumInputs(workloadInfo, descriptorName, 1);
621 ValidateNumOutputs(workloadInfo, descriptorName, 1);
631 std::vector<DataType> supportedInputTypes =
642 ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
644 auto inputShape = inputTensorInfo.
GetShape();
645 auto outputShape = outputTensorInfo.
GetShape();
650 const std::string outputShapeError{
": Output tensor shape does not match shape inferred from input tensor."};
653 if (inputShape.GetNumDimensions() == 1)
655 if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
662 for (
unsigned int i = 0; i < unsignedAxis; ++i)
664 if (outputShape[i] != inputShape[i])
670 for (
auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
672 if (outputShape[i - 1] != inputShape[i])
682 const std::string descriptorName{
"SoftmaxQueueDescriptor"};
684 ValidateNumInputs(workloadInfo, descriptorName, 1);
685 ValidateNumOutputs(workloadInfo, descriptorName, 1);
690 std::vector<DataType> supportedTypes =
700 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
701 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
702 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
707 const std::string descriptorName{
"SplitterQueueDescriptor"};
709 ValidateNumInputs(workloadInfo, descriptorName, 1);
712 std::vector<DataType> supportedTypes =
728 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
730 const std::string outputName =
"output_" + std::to_string(i);
731 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input", outputName);
742 descriptorName +
": Number of split windows " 743 "has to match number of workloadInfo.m_OutputTensorInfos. " 744 "Number of windows: " +
745 to_string(m_ViewOrigins.size()) +
746 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.
m_OutputTensorInfos.size()));
751 for(
unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
758 "have the same dimensionality as the input tensor. " 759 "Window origin (index: " +
760 to_string(w) +
") has " + to_string(e.
m_Origin.size()) +
761 " dimensions, the input " 763 to_string(inputDims) +
" dimensions.");
765 for (
unsigned int i = 0; i < e.
m_Origin.size(); ++i)
771 "be smaller or equal than the size of the input in that coord.");
779 const std::string descriptorName{
"ConcatQueueDescriptor"};
781 ValidateNumOutputs(workloadInfo, descriptorName, 1);
801 if(m_Parameters.GetConcatAxis() > workloadInfo.
m_InputTensorInfos[0].GetShape().GetNumDimensions())
806 if (workloadInfo.
m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
814 descriptorName +
": Number of split windows " 815 "has to match number of workloadInfo.m_InputTensorInfos. " 816 "Number of windows: " +
817 to_string(m_ViewOrigins.size()) +
818 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.
m_InputTensorInfos.size()));
823 for(
unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
827 if (e.
m_Origin.size() != outputDims)
830 "have the same dimensionality as the output tensor. " 831 "Window origin (index: " +
832 to_string(w) +
") has " + to_string(e.
m_Origin.size()) +
833 " dimensions, the output " 835 to_string(outputDims) +
" dimensions.");
838 for (
unsigned int i = 0; i < e.
m_Origin.size(); ++i)
844 "be smaller or equal than the size of the output in that coord.");
850 std::vector<DataType> supportedTypes =
866 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
868 const std::string inputName =
"input_" + std::to_string(i);
869 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName,
"output");
875 const std::string descriptorName{
"StackQueueDescriptor"};
877 ValidateNumOutputs(workloadInfo, descriptorName, 1);
885 const TensorShape& inputShape = m_Parameters.m_InputShape;
904 "than the number of input dimensions.");
909 for (
unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
911 if (outputShape[i] != inputShape[i])
914 "match shape inferred from input tensor.");
918 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
921 "match shape inferred from input tensor.");
924 for (
unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.
GetNumDimensions() + 1; ++i)
926 if (outputShape[i] != inputShape[i-1])
929 "match shape inferred from input tensor.");
939 std::vector<DataType> supportedTypes =
951 ValidateDataTypes(workloadInfo.
m_InputTensorInfos[0], supportedTypes, descriptorName);
959 "input_" + std::to_string(i));
971 const std::string descriptorName{
"FillQueueDescriptor"};
973 ValidateNumInputs(workloadInfo, descriptorName, 1);
974 ValidateNumOutputs(workloadInfo, descriptorName, 1);
979 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 1,
"input");
981 std::vector<DataType> supportedTypes =
989 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
994 const std::string descriptorName{
"FullyConnectedQueueDescriptor"};
996 ValidateNumInputs(workloadInfo, descriptorName, 1);
997 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1002 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2,
"output");
1009 ValidatePointer(m_Weight, descriptorName,
"weight");
1011 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1012 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2,
"weight");
1014 if (m_Parameters.m_BiasEnabled)
1016 ValidatePointer(m_Bias, descriptorName,
"bias");
1019 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
1020 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1023 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1,
"bias");
1027 std::vector<DataType> supportedTypes =
1037 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1045 "for BFloat16 input.");
1050 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1056 const std::string descriptorName{
"NormalizationQueueDescriptor"};
1058 ValidateNumInputs(workloadInfo, descriptorName, 1);
1059 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1065 std::vector<DataType> supportedTypes =
1075 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1077 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1079 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1084 const std::string descriptorName{
"AdditionQueueDescriptor"};
1086 ValidateNumInputs(workloadInfo, descriptorName, 2);
1087 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1093 std::vector<DataType> supportedTypes =
1104 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1105 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1106 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1108 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
1109 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName,
"input_1",
"output");
1111 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1121 const std::string descriptorName{
"MultiplicationQueueDescriptor"};
1123 ValidateNumInputs(workloadInfo, descriptorName, 2);
1124 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1130 std::vector<DataType> supportedTypes =
1141 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1142 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1143 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1145 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
1146 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName,
"input_1",
"output");
1148 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1158 const std::string descriptorName{
"BatchNormalizationQueueDescriptor"};
1160 ValidateNumInputs(workloadInfo, descriptorName, 1);
1161 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1166 std::vector<DataType> supportedTypes =
1176 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1177 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1179 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1180 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1182 ValidatePointer(m_Mean, descriptorName,
"mean");
1183 ValidatePointer(m_Variance, descriptorName,
"variance");
1184 ValidatePointer(m_Beta, descriptorName,
"beta");
1185 ValidatePointer(m_Gamma, descriptorName,
"gamma");
1187 const TensorInfo& mean = m_Mean->GetTensorInfo();
1188 const TensorInfo& variance = m_Variance->GetTensorInfo();
1189 const TensorInfo& beta = m_Beta->GetTensorInfo();
1190 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1192 ValidateTensorNumDimensions(mean, descriptorName, 1,
"mean");
1193 ValidateTensorNumDimensions(variance, descriptorName, 1,
"variance");
1194 ValidateTensorNumDimensions(beta, descriptorName, 1,
"beta");
1195 ValidateTensorNumDimensions(gamma, descriptorName, 1,
"gamma");
1197 ValidateTensorShapesMatch(mean, variance, descriptorName,
"mean",
"variance");
1198 ValidateTensorShapesMatch(mean, beta, descriptorName,
"mean",
"beta");
1199 ValidateTensorShapesMatch(mean, gamma, descriptorName,
"mean",
"gamma");
1204 const std::string descriptorName{
"Convolution2dQueueDescriptor"};
1206 ValidateNumInputs(workloadInfo, descriptorName, 1);
1207 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1212 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1213 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1215 ValidatePointer(m_Weight, descriptorName,
"weight");
1217 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1218 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
1220 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1223 if (m_Parameters.m_BiasEnabled)
1225 ValidatePointer(m_Bias, descriptorName,
"bias");
1227 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1231 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1234 ValidatePerAxisQuantization(inputTensorInfo,
1237 optionalBiasTensorInfo,
1240 std::vector<DataType> supportedTypes =
1251 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1259 "for BFloat16 input.");
1264 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1270 const std::string descriptorName{
"DepthwiseConvolution2dQueueDescriptor"};
1272 ValidateNumInputs(workloadInfo, descriptorName, 1);
1273 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1278 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1279 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1281 ValidatePointer(m_Weight, descriptorName,
"weight");
1283 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1284 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
1286 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1289 boost::str(boost::format(
"%1%: dilationX (provided %2%) and dilationY (provided %3%) " 1290 "cannot be smaller than 1.") % descriptorName %
1291 m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1294 const unsigned int channelIndex = (m_Parameters.m_DataLayout ==
DataLayout::NCHW) ? 1 : 3;
1298 const unsigned int numWeightChannelMultiplier = weightTensorInfo.
GetShape()[0];
1299 const unsigned int numWeightInputChannels = weightTensorInfo.
GetShape()[1];
1300 const unsigned int numWeightOutputChannels = outputTensorInfo.
GetShape()[channelIndex];
1301 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1304 boost::str(boost::format(
"%1%: output_channels (provided %2%) should be " 1305 "equal to input_channels (provided %3%) multiplied by channel_multiplier " 1306 "(provided %4%).") % descriptorName % numWeightOutputChannels %
1307 numWeightInputChannels % numWeightChannelMultiplier));
1310 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1313 if (m_Parameters.m_BiasEnabled)
1315 ValidatePointer(m_Bias, descriptorName,
"bias");
1317 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1320 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1323 ValidatePerAxisQuantization(inputTensorInfo,
1326 optionalBiasTensorInfo,
1329 std::vector<DataType> supportedTypes =
1339 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1340 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1345 const std::string descriptorName{
"PermuteQueueDescriptor"};
1347 ValidateNumInputs(workloadInfo, descriptorName, 1);
1348 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1355 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.
GetSize(),
"input");
1356 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.
GetSize(),
"output");
1358 for (
unsigned int i = 0u; i < mapping.
GetSize(); ++i)
1360 if (inputTensorInfo.
GetShape()[i] != outputTensorInfo.
GetShape()[mapping[i]])
1363 " (=" + to_string(inputTensorInfo.
GetShape()[i]) +
") " +
1364 "must match dst dimension " + to_string(mapping[i]) +
1365 " (=" + to_string(outputTensorInfo.
GetShape()[mapping[i]]) +
")");
1369 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1374 const std::string descriptorName{
"Pooling2dQueueDescriptor"};
1376 ValidateNumInputs(workloadInfo, descriptorName, 1);
1377 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1382 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1383 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1385 std::vector<DataType> supportedTypes =
1395 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1396 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1401 const std::string descriptorName{
"ResizeBilinearQueueDescriptor"};
1403 ValidateNumInputs(workloadInfo, descriptorName, 1);
1404 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1409 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1410 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1412 std::vector<DataType> supportedTypes =
1422 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1423 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1426 const unsigned int inputBatchSize = inputTensorInfo.
GetShape()[0];
1427 const unsigned int outputBatchSize = outputTensorInfo.
GetShape()[0];
1428 if (inputBatchSize != outputBatchSize)
1431 boost::str(boost::format(
"%1%: Input batch size (%2%) " 1432 "does not match output batch size (%3%)") %
1433 descriptorName % inputBatchSize % outputBatchSize));
1439 if (inputChannelCount != outputChannelCount)
1442 boost::str(boost::format(
"%1%: Input channel count (%2%) " 1443 "does not match output channel count (%3%)") %
1444 descriptorName % inputChannelCount % outputChannelCount));
1450 const std::string descriptorName{
"ResizeQueueDescriptor"};
1452 ValidateNumInputs(workloadInfo, descriptorName, 1);
1453 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1458 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1459 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1461 std::vector<DataType> supportedTypes =
1471 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1472 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1475 const unsigned int inputBatchSize = inputTensorInfo.
GetShape()[0];
1476 const unsigned int outputBatchSize = outputTensorInfo.
GetShape()[0];
1477 if (inputBatchSize != outputBatchSize)
1480 boost::str(boost::format(
"%1%: Input batch size (%2%) " 1481 "does not match output batch size (%3%)") %
1482 descriptorName % inputBatchSize % outputBatchSize));
1488 if (inputChannelCount != outputChannelCount)
1491 boost::str(boost::format(
"%1%: Input channel count (%2%) " 1492 "does not match output channel count (%3%)") %
1493 descriptorName % inputChannelCount % outputChannelCount));
1499 const std::string descriptorName{
"FakeQuantizationQueueDescriptor"};
1501 ValidateNumInputs(workloadInfo, descriptorName, 1);
1502 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1507 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2,
"input");
1508 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2,
"output");
1510 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1512 if (m_Parameters.m_Min > m_Parameters.m_Max)
1520 const std::string descriptorName{
"InstanceNormalizationQueueDescriptor"};
1522 ValidateNumInputs(workloadInfo, descriptorName, 1);
1523 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1533 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1536 std::vector<DataType> supportedTypes =
1543 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1544 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1549 const std::string descriptorName{
"L2NormalizationQueueDescriptor"};
1551 ValidateNumInputs(workloadInfo, descriptorName, 1);
1552 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1562 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1565 std::vector<DataType> supportedTypes =
1575 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1576 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1581 const std::string descriptorName{
"LogSoftmaxQueueDescriptor"};
1583 ValidateNumInputs(workloadInfo, descriptorName, 1);
1584 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1589 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1591 std::vector<DataType> supportedTypes =
1598 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1599 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1604 const std::string descriptorName{
"ConstantQueueDescriptor"};
1606 ValidateNumInputs(workloadInfo, descriptorName, 0);
1607 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1615 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName,
"constant",
"output");
1618 std::vector<DataType> supportedTypes =
1630 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1635 const std::string descriptorName{
"ReshapeQueueDescriptor"};
1637 ValidateNumInputs(workloadInfo, descriptorName, 1);
1638 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1643 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1646 std::vector<DataType> supportedTypes =
1657 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1658 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1663 const std::string descriptorName{
"SpaceToBatchNdQueueDescriptor"};
1665 ValidateNumInputs(workloadInfo, descriptorName, 1);
1666 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1671 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1672 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1674 if (m_Parameters.m_BlockShape.size() != 2)
1679 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1682 "dimensions as Block Shape.");
1687 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1688 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1692 const unsigned int inputWidth = inputShape[dimensionIndices.
GetWidthIndex()] +
1693 widthPad.first + widthPad.second;
1694 const unsigned int inputHeight = inputShape[dimensionIndices.
GetHeightIndex()] +
1695 heightPad.first + heightPad.second;
1697 const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1699 const unsigned int numOutputElements = outputTensorInfo.
GetNumElements();
1701 if (numOutputElements != numInputElements)
1704 to_string(numInputElements) +
" after padding but output tensor has " +
1705 to_string(numOutputElements) +
" elements.");
1708 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1711 "divisible by Block Shape in all spatial dimensions");
1714 std::vector<DataType> supportedTypes =
1724 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1725 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1730 const std::string descriptorName{
"SpaceToDepthQueueDescriptor"};
1732 ValidateNumInputs(workloadInfo, descriptorName, 1);
1733 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1738 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1739 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1741 std::vector<DataType> supportedTypes =
1751 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1752 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1754 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1756 if (m_Parameters.m_BlockSize == 0)
1762 const unsigned int wIndex = dimensionIndices.
GetWidthIndex();
1767 if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1770 "by block size in all spatial dimensions");
1774 if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1777 "must be divisible by the square of block size." );
1783 const std::string descriptorName{
"FloorQueueDescriptor"};
1785 ValidateNumInputs(workloadInfo, descriptorName, 1);
1786 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1791 std::vector<DataType> supportedTypes =
1799 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1801 if (inputTensorInfo != outputTensorInfo)
1811 const std::string descriptorName{
"LstmQueueDescriptor"};
1823 std::vector<DataType> supportedTypes =
1832 ValidateDataTypes(workloadInfo.
m_InputTensorInfos[0], supportedTypes, descriptorName);
1841 "input_" + std::to_string(i));
1848 "LstmQueueDescriptor",
1850 "output_" + std::to_string(i));
1856 if (m_Parameters.m_ClippingThresCell < 0.0f)
1860 if (m_Parameters.m_ClippingThresProj < 0.0f)
1869 ValidatePointer(m_InputToOutputWeights,
"Null pointer check",
"InputToOutputWeights");
1870 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1871 ValidatePointer(m_RecurrentToOutputWeights,
"Null pointer check",
"RecurrentToOutputWeights");
1872 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1875 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[0], 2, (n_batch * n_input),
1876 descriptorName +
" input_0");
1878 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[1], 2, (n_batch * n_output),
1879 descriptorName +
" input_1");
1881 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[2], 2, (n_batch * n_cell),
1882 descriptorName +
" input_2");
1884 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1885 ValidateTensorNumDimNumElem(workloadInfo.
m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1886 descriptorName +
" output_0");
1889 descriptorName +
" output_1");
1892 descriptorName +
" output_2");
1895 descriptorName +
" output_3");
1899 if ( m_InputToInputWeights )
1901 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1902 (n_cell * n_input),
"InputLayerNormWeights");
1905 ValidatePointer(m_InputToForgetWeights,
"Null pointer check",
"InputToForgetWeights");
1906 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1907 (n_cell * n_input),
"InputToForgetWeights");
1909 ValidatePointer(m_InputToCellWeights,
"Null pointer check",
"InputToCellWeights");
1910 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1911 (n_cell * n_input),
"InputToCellWeights");
1913 if ( m_RecurrentToInputWeights )
1915 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1916 (n_cell * n_output),
"RecurrentToInputWeights");
1919 ValidatePointer(m_RecurrentToForgetWeights,
"Null pointer check",
"RecurrentToForgetWeights");
1920 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1921 (n_cell * n_output),
"RecurrentToForgetWeights");
1923 ValidatePointer(m_RecurrentToCellWeights,
"Null pointer check",
"RecurrentToCellWeights");
1924 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1925 (n_cell * n_output),
"RecurrentToCellWeights");
1929 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1930 !m_Parameters.m_CifgEnabled) ||
1931 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1932 m_Parameters.m_CifgEnabled));
1933 if (!cifg_weights_all_or_none)
1936 "RecurrentToInputWeights must either both be present (regular LSTM) " 1937 "or both not present (CIFG-LSTM). In addition CifgEnable must be set " 1941 if ( m_CellToInputWeights )
1943 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1944 n_cell,
"CellToInputWeights");
1946 if ( m_CellToForgetWeights )
1948 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1949 n_cell,
"CellToForgetWeights");
1951 if ( m_CellToOutputWeights )
1953 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1954 n_cell,
"CellToOutputWeights");
1958 bool peephole_weights_all_or_none =
1959 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1960 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1961 || ( !m_CellToInputWeights && !m_CellToForgetWeights
1962 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1963 if (!peephole_weights_all_or_none)
1969 if (m_Parameters.m_CifgEnabled)
1971 if (m_InputGateBias)
1978 if (!m_InputGateBias)
1981 "must be present.");
1983 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1984 n_cell,
"InputGateBias");
1987 ValidatePointer(m_ForgetGateBias,
"Null pointer check",
"ForgetGateBias");
1988 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell,
"ForgetGateBias");
1990 ValidatePointer(m_CellBias,
"Null pointer check",
"CellBias");
1991 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell,
"CellBias");
1993 ValidatePointer(m_OutputGateBias,
"Null pointer check",
"OutputGateBias");
1994 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell,
"OutputGateBias");
1996 if (m_ProjectionWeights)
1998 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1999 (n_cell * n_output),
"ProjectionWeights");
2001 if (m_ProjectionBias)
2003 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output,
"ProjectionBias");
2010 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
2011 !m_Parameters.m_ProjectionEnabled)
2012 || (m_ProjectionWeights && !m_ProjectionBias &&
2013 m_Parameters.m_ProjectionEnabled)
2014 || (m_ProjectionWeights && m_ProjectionBias &&
2015 m_Parameters.m_ProjectionEnabled));
2016 if (!projecton_tensors_consistent)
2025 if (m_InputLayerNormWeights)
2027 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"InputLayerNormWeights");
2029 if (m_ForgetLayerNormWeights)
2031 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell,
"ForgetLayerNormWeights");
2033 if (m_CellLayerNormWeights)
2035 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell,
"CellLayerNormWeights");
2037 if (m_OutputLayerNormWeights)
2039 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"OutputLayerNormWeights");
2042 if (m_Parameters.m_LayerNormEnabled)
2044 if (!m_Parameters.m_CifgEnabled)
2046 if (!m_InputLayerNormWeights)
2049 "disabled but InputLayerNormWeights are not present");
2051 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
2052 1, n_cell,
"InputLayerNormWeights");
2054 else if (m_InputLayerNormWeights)
2060 ValidatePointer(m_ForgetLayerNormWeights,
"Null pointer check layer normalisation enabled",
2061 "ForgetLayerNormWeights");
2062 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell,
"ForgetLayerNormWeights");
2064 ValidatePointer(m_OutputLayerNormWeights,
"Null pointer check layer normalisation enabled",
2065 "OutputLayerNormWeights");
2066 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"OutputLayerNormWeights");
2068 ValidatePointer(m_CellLayerNormWeights,
"Null pointer check layer normalisation enabled",
2069 "CellLayerNormWeights");
2070 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell,
"CellLayerNormWeights");
2072 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2075 "normalisation weights are present.");
2081 const std::string descriptorName{
"ConvertBf16ToFp32QueueDescriptor"};
2083 ValidateNumInputs(workloadInfo, descriptorName, 1);
2084 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2099 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2104 const std::string descriptorName{
"ConvertFp32ToBf16QueueDescriptor"};
2106 ValidateNumInputs(workloadInfo, descriptorName, 1);
2107 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2122 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2127 const std::string descriptorName{
"ConvertFp32ToFp16QueueDescriptor"};
2129 ValidateNumInputs(workloadInfo, descriptorName, 1);
2130 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2145 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2150 const std::string descriptorName{
"ConvertFp16ToFp32QueueDescriptor"};
2152 ValidateNumInputs(workloadInfo, descriptorName, 1);
2153 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2168 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2173 const std::string descriptorName{
"DivisionQueueDescriptor"};
2175 ValidateNumInputs(workloadInfo, descriptorName, 2);
2176 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2182 std::vector<DataType> supportedTypes =
2193 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2194 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2195 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2197 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2207 const std::string descriptorName{
"SubtractionQueueDescriptor"};
2209 ValidateNumInputs(workloadInfo, descriptorName, 2);
2210 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2216 std::vector<DataType> supportedTypes =
2227 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2228 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2229 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2231 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2241 const std::string descriptorName{
"MaximumQueueDescriptor"};
2243 ValidateNumInputs(workloadInfo, descriptorName, 2);
2244 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2250 std::vector<DataType> supportedTypes =
2261 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2262 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2263 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2265 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2275 const std::string descriptorName{
"MeanQueueDescriptor"};
2277 ValidateNumInputs(workloadInfo, descriptorName, 1);
2278 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2283 std::vector<DataType> supportedTypes =
2295 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2296 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2298 if (m_Parameters.m_KeepDims)
2300 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.
GetNumDimensions(),
"output");
2302 else if (m_Parameters.m_Axis.empty())
2304 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1,
"output");
2308 unsigned int outputDim =
2310 ValidateTensorNumDimensions(outputTensorInfo,
2312 outputDim > 0 ? outputDim : 1,
2319 const std::string descriptorName{
"PadQueueDescriptor"};
2321 ValidateNumInputs(workloadInfo, descriptorName, 1);
2322 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2328 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.
GetNumDimensions(),
"output");
2333 "as there are dimensions in the input tensor that is " +
2335 " not " + std::to_string(m_Parameters.m_PadList.size()) +
" entries.");
2341 const std::string descriptorName{
"QuantizeQueueDescriptor"};
2343 ValidateNumInputs(workloadInfo, descriptorName, 1);
2344 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2349 std::vector<DataType> supportedTypes =
2360 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2370 const std::string descriptorName{
"BatchToSpaceNdQueueDescriptor"};
2372 ValidateNumInputs(workloadInfo, descriptorName, 1);
2373 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2378 std::vector<DataType> supportedTypes =
2388 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2389 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2394 const std::string descriptorName{
"StridedSliceQueueDescriptor"};
2396 ValidateNumInputs(workloadInfo, descriptorName, 1);
2397 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2402 std::vector<DataType> supportedTypes =
2412 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2413 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2415 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2424 if (m_Parameters.m_Begin.size() != rank)
2429 if (m_Parameters.m_End.size() != rank)
2434 if (m_Parameters.m_Stride.size() != rank)
2440 for (
auto& stride : m_Parameters.m_Stride)
2451 const std::string descriptorName{
"MinimumQueueDescriptor"};
2453 ValidateNumInputs(workloadInfo, descriptorName, 2);
2454 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2460 std::vector<DataType> supportedTypes =
2471 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2472 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2473 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2475 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2485 const std::string descriptorName{
"DebugQueueDescriptor"};
2487 ValidateNumInputs(workloadInfo, descriptorName, 1);
2488 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2493 const std::string descriptorName{
"EqualQueueDescriptor"};
2495 ValidateNumInputs(workloadInfo, descriptorName, 2);
2496 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2502 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2517 const std::string descriptorName{
"GreaterQueueDescriptor"};
2519 ValidateNumInputs(workloadInfo, descriptorName, 2);
2520 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2526 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2541 const std::string descriptorName{
"RsqrtQueueDescriptor"};
2543 ValidateNumInputs(workloadInfo, descriptorName, 1);
2544 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2549 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2551 std::vector<DataType> supportedTypes =
2561 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2562 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2567 const std::string descriptorName{
"GatherQueueDescriptor"};
2569 ValidateNumInputs(workloadInfo, descriptorName, 2);
2570 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2581 std::vector<DataType> supportedTypes =
2592 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2594 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2597 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim,
"output");
2602 const std::string& descriptorName{
"DetectionPostProcessQueueDescriptor"};
2604 ValidateNumInputs(workloadInfo, descriptorName, 2);
2612 if (m_Anchors ==
nullptr)
2626 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3,
"box encodings");
2627 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3,
"scores");
2628 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2,
"anchors");
2630 const std::vector<DataType> supportedInputTypes =
2640 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2641 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2642 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2644 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3,
"detection boxes");
2645 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2,
"detection scores");
2646 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2,
"detection classes");
2647 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1,
"num detections");
2650 ValidateTensorDataType(detectionBoxesInfo,
DataType::Float32, descriptorName,
"detection boxes");
2651 ValidateTensorDataType(detectionScoresInfo,
DataType::Float32, descriptorName,
"detection scores");
2652 ValidateTensorDataType(detectionClassesInfo,
DataType::Float32, descriptorName,
"detection classes");
2653 ValidateTensorDataType(numDetectionsInfo,
DataType::Float32, descriptorName,
"num detections");
2655 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2658 "must be positive and less than or equal to 1.");
2661 if (scoresInfo.
GetShape()[2] != m_Parameters.m_NumClasses + 1)
2664 "should be equal to number of classes + 1.");
2670 const std::string& descriptorName{
"DequantizeQueueDescriptor"};
2672 ValidateNumInputs(workloadInfo, descriptorName, 1);
2673 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2683 std::vector<DataType> supportedTypes =
2690 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2695 const std::string& descriptorName{
"MergeQueueDescriptor"};
2697 ValidateNumInputs(workloadInfo, descriptorName, 2);
2698 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2704 ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
2705 ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName,
"input_0",
"output");
2707 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
2708 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName,
"input_0",
"output");
2713 const std::string& descriptorName{
"SwitchQueueDescriptor"};
2715 ValidateNumInputs(workloadInfo, descriptorName, 2);
2716 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2724 std::vector<DataType> supportedTypes =
2733 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2734 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2736 ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2737 ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2739 ValidateTensorShapesMatch(inputTensorInfo0,
2745 ValidateTensorShapesMatch(inputTensorInfo0,
2759 const std::string& descriptorName{
"PreluQueueDescriptor"};
2761 ValidateNumInputs(workloadInfo, descriptorName, 2);
2762 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2768 std::vector<DataType> supportedTypes
2778 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2779 ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2781 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2783 ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName,
"input",
"alpha");
2784 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"ouptut");
2786 ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2796 const std::string descriptorName{
"TransposeConvolution2dQueueDescriptor"};
2798 ValidateNumInputs(workloadInfo, descriptorName, 1);
2799 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2804 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
2805 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
2807 ValidatePointer(m_Weight, descriptorName,
"weight");
2809 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2810 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
2812 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2815 if (m_Parameters.m_BiasEnabled)
2817 ValidatePointer(m_Bias, descriptorName,
"bias");
2819 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2823 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2826 ValidatePerAxisQuantization(inputTensorInfo,
2829 optionalBiasTensorInfo,
2832 std::vector<DataType> supportedTypes =
2842 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2843 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2848 const std::string descriptorName{
"TransposeQueueDescriptor"};
2850 ValidateNumInputs(workloadInfo, descriptorName, 1);
2851 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2858 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.
GetSize(),
"input");
2859 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.
GetSize(),
"output");
2861 for (
unsigned int i = 0u; i < mapping.
GetSize(); ++i)
2863 if (inputTensorInfo.
GetShape()[mapping[i]] != outputTensorInfo.
GetShape()[i])
2866 " (=" + to_string(inputTensorInfo.
GetShape()[mapping[i]]) +
") " +
2867 "must match dst dimension " + to_string(i) +
2868 " (=" + to_string(outputTensorInfo.
GetShape()[i]) +
")");
2872 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2877 const std::string descriptorName{
"QLstmQueueDescriptor"};
2880 ValidateNumInputs(workloadInfo, descriptorName, 3);
2881 ValidateNumOutputs(workloadInfo, descriptorName, 3);
2893 std::vector<DataType> inputOutputSupportedTypes =
2898 std::vector<DataType> cellStateSupportedTypes =
2903 std::vector<DataType> weightsSupportedTypes =
2908 std::vector<DataType> layerNormPeepholeWeightsSupportedTypes =
2913 std::vector<DataType> biasSupportedTypes =
2919 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2920 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2921 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2923 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2924 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2925 ValidateDataTypes(outputInfo, inputOutputSupportedTypes, descriptorName);
2928 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
2929 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2930 "outputStateIn",
"outputStateOut");
2931 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
2934 const uint32_t numBatches = inputInfo.GetShape()[0];
2935 const uint32_t inputSize = inputInfo.GetShape()[1];
2936 const uint32_t outputSize = outputStateInInfo.GetShape()[1];
2937 const uint32_t numUnits = cellStateInInfo.GetShape()[1];
2940 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName +
" input");
2941 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateIn");
2942 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * numUnits), descriptorName +
" cellStateIn");
2944 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateOut");
2945 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * numUnits), descriptorName +
" cellStateOut");
2946 ValidateTensorNumDimNumElem(outputInfo, 2, (numBatches * outputSize), descriptorName +
" output");
2949 ValidatePointer(m_InputToForgetWeights, descriptorName,
"InputToForgetWeights");
2950 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2951 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (numUnits * inputSize),
" InputToForgetWeights");
2953 ValidatePointer(m_InputToCellWeights, descriptorName,
"InputToCellWeights");
2954 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2955 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (numUnits * inputSize),
" InputToCellWeights");
2957 ValidatePointer(m_InputToOutputWeights, descriptorName,
"InputToOutputWeights");
2958 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2959 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (numUnits * inputSize),
" InputToOutputWeights");
2961 ValidatePointer(m_RecurrentToForgetWeights, descriptorName,
"RecurrentToForgetWeights");
2962 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2963 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (numUnits * outputSize),
2964 " RecurrentToForgetWeights");
2966 ValidatePointer(m_RecurrentToCellWeights, descriptorName,
"RecurrentToCellWeights");
2967 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2968 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (numUnits * outputSize),
" RecurrentToCellWeights");
2970 ValidatePointer(m_RecurrentToOutputWeights, descriptorName,
"RecurrentToOutputWeights");
2971 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2972 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (numUnits * outputSize),
" RecurrentToCellWeights");
2975 ValidateDataTypes(inputToForgetWeightsInfo, weightsSupportedTypes, descriptorName);
2977 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToCellWeightsInfo, descriptorName,
2978 "inputToForgetWeights",
"inputToCellWeights");
2979 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2980 "inputToForgetWeights",
"inputToOutputWeights");
2982 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2983 "inputToForgetWeights",
"recurrentToForgeteights");
2984 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2985 "inputToForgetWeights",
"recurrentToCellWeights");
2986 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2987 "inputToForgetWeights",
"recurrentToOutputWeights");
2990 ValidatePointer(m_ForgetGateBias, descriptorName,
"ForgetGateBias");
2991 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2992 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, numUnits,
" ForgetGateBias");
2994 ValidatePointer(m_CellBias, descriptorName,
"CellBias");
2995 auto cellBiasInfo = m_CellBias->GetTensorInfo();
2996 ValidateTensorNumDimNumElem(cellBiasInfo, 1, numUnits,
" CellBias");
2998 ValidatePointer(m_OutputGateBias, descriptorName,
"OutputGateBias");
2999 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3000 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, numUnits,
" OutputGateBias");
3003 ValidateDataTypes(forgetGateBiasInfo, biasSupportedTypes, descriptorName);
3005 ValidateTensorDataTypesMatch(forgetGateBiasInfo, cellBiasInfo, descriptorName,
3006 "forgetGateBias",
"cellBias");
3007 ValidateTensorDataTypesMatch(forgetGateBiasInfo, outputGateBiasInfo, descriptorName,
3008 "forgetGateBias",
"outputGateBias");
3011 const bool allCifgParamsPresentOrNot = ((m_InputToInputWeights && m_RecurrentToInputWeights && m_InputGateBias &&
3012 !m_Parameters.m_CifgEnabled) ||
3013 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
3014 !m_InputGateBias && m_Parameters.m_CifgEnabled));
3016 if (!allCifgParamsPresentOrNot)
3019 ": InputToInputWeights, RecurrentToInputWeights and InputGateBias must either all be present " 3020 "(CIFG disabled) or not be present at all (CIFG enabled). m_Parameters.m_CifgEnabled should be " 3021 "set appropriately.");
3024 if (!m_Parameters.m_CifgEnabled)
3027 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3028 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (numUnits * inputSize),
" InputToInputWeights");
3030 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3031 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (numUnits * outputSize),
3032 " RecurrentToInputWeights");
3034 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3035 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, numUnits,
" InputGateBias");
3038 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, inputToInputWeightsInfo, descriptorName,
3039 "inputToForgetWeights",
"inputToInputWeights");
3040 ValidateTensorDataTypesMatch(inputToForgetWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3041 "inputToForgetWeights",
"recurrentToInputWeights");
3042 ValidateTensorDataTypesMatch(forgetGateBiasInfo, inputGateBiasInfo, descriptorName,
3043 "forgetGateBias",
"inputGateBias");
3047 bool allPeepholeWeightsPresentOrNot =
3048 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
3049 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
3050 || (!m_CellToInputWeights && !m_CellToForgetWeights
3051 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
3053 if (!allPeepholeWeightsPresentOrNot)
3056 ": CellToInputWeights, CellToForgetWeights and CellToOutputWeights should all be present (Peephole " 3057 "enabled) or not be present at all (Peephole disabled). CellToInputWeights should only be present " 3058 "when Peephole is enabled and CIFG is disabled. m_Parameters.m_PeepholeEnabled should be set " 3062 if (m_Parameters.m_PeepholeEnabled)
3064 auto cellToForgetWeightsInfo = m_CellToForgetWeights->GetTensorInfo();
3065 ValidateTensorNumDimNumElem(cellToForgetWeightsInfo, 1, numUnits,
" cellToForgetWeights");
3066 ValidateDataTypes(cellToForgetWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3068 auto cellToOutputWeightsInfo = m_CellToOutputWeights->GetTensorInfo();
3069 ValidateTensorNumDimNumElem(cellToOutputWeightsInfo, 1, numUnits,
" cellToOutputWeights");
3070 ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToOutputWeightsInfo, descriptorName,
3071 "cellToForgetWeight",
"cellToOutputWeights");
3073 if (!m_Parameters.m_CifgEnabled)
3075 auto cellToInputWeightsInfo = m_CellToInputWeights->GetTensorInfo();
3076 ValidateTensorNumDimNumElem(cellToInputWeightsInfo, 1, numUnits,
" cellToInputWeights");
3077 ValidateTensorDataTypesMatch(cellToForgetWeightsInfo, cellToInputWeightsInfo, descriptorName,
3078 "cellToForgetWeights",
"cellToInputWeights");
3083 bool allLayerNormWeightsPresentOrNot =
3084 (((m_InputLayerNormWeights || m_Parameters.m_CifgEnabled) && m_ForgetLayerNormWeights
3085 && m_CellLayerNormWeights && m_OutputLayerNormWeights && m_Parameters.m_LayerNormEnabled)
3086 || (!m_InputLayerNormWeights && !m_ForgetLayerNormWeights && !m_CellLayerNormWeights
3087 && !m_OutputLayerNormWeights && !m_Parameters.m_LayerNormEnabled));
3089 if (!allLayerNormWeightsPresentOrNot)
3092 ": InputLayerNormWeights, ForgetLayerNormWeights, m_OutputLayerNormWeights " 3093 "and CellLayerNormWeights should all be present (Layer Norm enabled) or not " 3094 "be present at all (Layer Norm disabled). InputLayerNormWeights should " 3095 "only be present when Layer Norm is enabled and CIFG is disabled. " 3096 "m_Parameters.m_LayerNormEnabled should be set appropriately.");
3099 if (m_Parameters.m_LayerNormEnabled)
3101 auto forgetLayerNormWeightsInfo = m_ForgetLayerNormWeights->GetTensorInfo();
3102 ValidateTensorNumDimNumElem(forgetLayerNormWeightsInfo, 1, numUnits,
" forgetLayerNormWeights");
3103 ValidateDataTypes(forgetLayerNormWeightsInfo, layerNormPeepholeWeightsSupportedTypes, descriptorName);
3105 auto cellLayerNormWeightsInfo = m_CellLayerNormWeights->GetTensorInfo();
3106 ValidateTensorNumDimNumElem(cellLayerNormWeightsInfo, 1, numUnits,
" cellLayerNormWeights");
3107 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, cellLayerNormWeightsInfo, descriptorName,
3108 "forgetLayerNormWeights",
"cellLayerNormWeights");
3110 auto outputLayerNormWeightsInfo = m_OutputLayerNormWeights->GetTensorInfo();
3111 ValidateTensorNumDimNumElem(outputLayerNormWeightsInfo, 1, numUnits,
" outputLayerNormWeights");
3112 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, outputLayerNormWeightsInfo, descriptorName,
3113 "forgetLayerNormWeights",
"outputLayerNormWeights");
3115 if (!m_Parameters.m_CifgEnabled)
3117 auto inputLayerNormWeightsInfo = m_InputLayerNormWeights->GetTensorInfo();
3118 ValidateTensorNumDimNumElem(inputLayerNormWeightsInfo, 1, numUnits,
" inputLayerNormWeights");
3119 ValidateTensorDataTypesMatch(forgetLayerNormWeightsInfo, inputLayerNormWeightsInfo, descriptorName,
3120 "forgetLayerNormWeights",
"inputLayerNormWeights");
3125 bool correctProjectionTensorsPresent =
3126 ((!m_ProjectionWeights && !m_ProjectionBias && !m_Parameters.m_ProjectionEnabled) ||
3127 (m_ProjectionWeights && !m_ProjectionBias && m_Parameters.m_ProjectionEnabled) ||
3128 (m_ProjectionWeights && m_ProjectionBias && m_Parameters.m_ProjectionEnabled));
3130 if (!correctProjectionTensorsPresent)
3133 ": If projection is enabled, ProjectionWeights should be present and " 3134 "ProjectionBias is optional. If projection is disabled, neither " 3135 "ProjectionWeights nor ProjectionBias should be present.");
3138 if (m_Parameters.m_ProjectionEnabled)
3140 auto projectionWeightsInfo = m_ProjectionWeights->GetTensorInfo();
3141 ValidateTensorNumDimNumElem(projectionWeightsInfo, 2, (numUnits * outputSize),
"ProjectionWeights");
3142 ValidateDataTypes(projectionWeightsInfo, weightsSupportedTypes, descriptorName);
3144 if (m_ProjectionBias)
3146 auto projectionBiasInfo = m_ProjectionBias->GetTensorInfo();
3147 ValidateTensorNumDimNumElem(projectionBiasInfo, 1, outputSize,
"ProjectionBias");
3148 ValidateDataTypes(projectionBiasInfo, biasSupportedTypes, descriptorName);
3152 else if ((outputInfo.GetQuantizationScale() != m_Parameters.m_HiddenStateScale) &&
3153 outputInfo.GetQuantizationOffset() != m_Parameters.m_HiddenStateZeroPoint) {
3155 ": If projection is disabled, output quantization info (scale, offset) " 3156 "should match HiddenStateScale and HiddenStateZeroPoint.");
3163 const std::string descriptorName{
"QuantizedLstmQueueDescriptor"};
3166 ValidateNumInputs(workloadInfo, descriptorName, 3);
3167 ValidateNumOutputs(workloadInfo, descriptorName, 2);
3177 std::vector<DataType> inputOutputSupportedTypes =
3182 std::vector<DataType> cellStateSupportedTypes =
3187 std::vector<DataType> weightsSupportedTypes =
3192 std::vector<DataType> biasSupportedTypes =
3198 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
3199 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
3200 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
3202 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
3203 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
3206 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
3207 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
3208 "outputStateIn",
"outputStateOut");
3209 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
3212 ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
3213 ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName,
"input",
"outputStateOut");
3214 ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
3217 const uint32_t numBatches = inputInfo.GetShape()[0];
3218 const uint32_t inputSize = inputInfo.GetShape()[1];
3219 const uint32_t outputSize = cellStateInInfo.GetShape()[1];
3222 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName +
" input");
3223 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName +
" cellStateIn");
3224 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateIn");
3225 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" cellStateOut");
3226 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateOut");
3229 ValidatePointer(m_InputToInputWeights, descriptorName,
"InputToInputWeights");
3230 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
3231 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize),
" InputToInputWeights");
3233 ValidatePointer(m_InputToForgetWeights, descriptorName,
"InputToForgetWeights");
3234 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
3235 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize),
" InputToForgetWeights");
3237 ValidatePointer(m_InputToCellWeights, descriptorName,
"InputToCellWeights");
3238 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
3239 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize),
" InputToCellWeights");
3241 ValidatePointer(m_InputToOutputWeights, descriptorName,
"InputToOutputWeights");
3242 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
3243 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize),
" InputToOutputWeights");
3245 ValidatePointer(m_RecurrentToInputWeights, descriptorName,
"RecurrentToInputWeights");
3246 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
3247 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToInputWeights");
3249 ValidatePointer(m_RecurrentToForgetWeights, descriptorName,
"RecurrentToForgetWeights");
3250 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
3251 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
3252 " RecurrentToForgetWeights");
3254 ValidatePointer(m_RecurrentToCellWeights, descriptorName,
"RecurrentToCellWeights");
3255 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
3256 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToCellWeights");
3258 ValidatePointer(m_RecurrentToOutputWeights, descriptorName,
"RecurrentToOutputWeights");
3259 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
3260 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToCellWeights");
3263 ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
3265 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
3266 "inputToInputWeights",
"inputToForgetWeights");
3267 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
3268 "inputToInputWeights",
"inputToCellWeights");
3269 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
3270 "inputToInputWeights",
"inputToOutputWeights");
3272 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
3273 "inputToInputWeights",
"recurrentToInputWeights");
3274 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
3275 "inputToInputWeights",
"recurrentToForgeteights");
3276 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
3277 "inputToInputWeights",
"recurrentToCellWeights");
3278 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
3279 "inputToInputWeights",
"recurrentToOutputWeights");
3282 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
3283 descriptorName,
"inputToInputWeights",
"inputToForgetWeights");
3284 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
3285 descriptorName,
"inputToInputWeights",
"inputToCellWeights");
3286 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
3287 descriptorName,
"inputToInputWeights",
"inputToOutputWeights");
3289 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
3290 descriptorName,
"inputToInputWeights",
"recurrentToInputWeights");
3291 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
3292 descriptorName,
"inputToInputWeights",
"recurrentToForgetWeights");
3293 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
3294 descriptorName,
"inputToInputWeights",
"recurrentToCellWeights");
3295 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
3296 descriptorName,
"inputToInputWeights",
"recurrentToOutputWeights");
3299 ValidatePointer(m_InputGateBias, descriptorName,
"InputGateBias");
3300 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
3301 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize,
" InputGateBias");
3303 ValidatePointer(m_ForgetGateBias, descriptorName,
"ForgetGateBias");
3304 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
3305 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize,
" ForgetGateBias");
3307 ValidatePointer(m_CellBias, descriptorName,
"CellBias");
3308 auto cellBiasInfo = m_CellBias->GetTensorInfo();
3309 ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize,
" CellBias");
3311 ValidatePointer(m_OutputGateBias, descriptorName,
"OutputGateBias");
3312 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
3313 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize,
" OutputGateBias");
3316 ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
3318 ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
3319 "inputGateBias",
"forgetGateBias");
3320 ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
3321 "inputGateBias",
"cellBias");
3322 ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
3323 "inputGateBias",
"outputGateBias");
3326 ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3327 ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3328 ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3329 ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
3334 const std::string descriptorName{
"AbsQueueDescriptor"};
3336 ValidateNumInputs(workloadInfo, descriptorName, 1);
3337 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3342 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3344 std::vector<DataType> supportedTypes =
3355 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3356 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3361 const std::string descriptorName{
"SliceQueueDescriptor"};
3363 ValidateNumInputs(workloadInfo, descriptorName, 1);
3364 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3369 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3377 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank,
"output");
3380 if (m_Parameters.m_Begin.size() != rank)
3383 ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
3385 if (m_Parameters.m_Size.size() != rank)
3388 ": Length of size descriptor must equal rank " + std::to_string(rank));
3393 for (
unsigned int i = 0u; i < rank; ++i)
3395 if (m_Parameters.m_Size[i] != outputShape[i])
3404 for(
unsigned int i = 0u; i < rank; ++i)
3406 if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
3409 std::to_string(i) +
" exceeds input size.");
3416 const std::string descriptorName{
"DepthToSpaceQueueDescriptor"};
3418 ValidateNumInputs(workloadInfo, descriptorName, 1);
3419 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3424 ValidateTensorNumDimensions(inputInfo, descriptorName, 4,
"input");
3425 ValidateTensorNumDimensions(outputInfo, descriptorName, 4,
"output");
3427 std::vector<DataType> supportedTypes =
3437 ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3438 ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3440 ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName,
"input",
"output");
3442 if (m_Parameters.m_BlockSize == 0)
3448 const unsigned int wIndex = dimensionIndices.
GetWidthIndex();
3453 if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3456 "must be divisible by block size.");
3460 if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3463 "must be divisible by the square of block size." );
3469 const std::string descriptorName{
"ComparisonQueueDescriptor"};
3471 ValidateNumInputs(workloadInfo, descriptorName, 2);
3472 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3478 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3493 const std::string descriptorName{
"ElementwiseUnaryQueueDescriptor"};
3495 ValidateNumInputs(workloadInfo, descriptorName, 1);
3496 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3501 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3503 std::vector<DataType> supportedTypes =
3514 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3515 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3520 const std::string descriptorName{
"RankQueueDescriptor"};
3522 ValidateNumInputs(workloadInfo, descriptorName, 1);
3523 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3528 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1,
"output");
3529 ValidateTensorNumElements(outputTensorInfo, descriptorName, 1,
"output");
3531 std::vector<DataType> supportedTypes =
3543 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3544 ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
constexpr bool IsQuantizedType()
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2020 ARM Limited.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< float > GetQuantizationScales() const
bool HasMultipleQuantizationScales() const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
constexpr bool IsQuantized8BitType(DataType dataType)
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_NO_DEPRECATE_WARN_END
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_ASSERT_MSG(COND, MSG)
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
bool has_value() const noexcept
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
Contains information about inputs and outputs to a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
unsigned int GetChannelsIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin