16 #include <boost/format.hpp> 17 #include <boost/numeric/conversion/cast.hpp> 27 switch (inputDataType)
29 case DataType::Float16:
30 return DataType::Float16;
31 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QAsymmS8:
34 return DataType::Signed32;
35 case DataType::QAsymmU8:
36 return DataType::Signed32;
37 case DataType::QSymmS8:
38 return DataType::Signed32;
39 case DataType::QSymmS16:
40 return DataType::Signed32;
42 BOOST_ASSERT_MSG(
false,
"Invalid input data type");
43 return DataType::Float32;
53 std::string to_string(T value)
55 std::ostringstream os;
61 void ValidatePointer(
const void* ptr, std::string
const& descName, std::string
const& paramName)
66 paramName +
" parameter must be set.");
71 void ValidateTensorShapesMatch(
const TensorInfo& first,
73 std::string
const& descName,
74 std::string
const& firstName,
75 std::string
const& secondName)
80 + firstName +
" & " + secondName +
" must have identical shapes");
85 void ValidateNumInputs(
const WorkloadInfo& workloadInfo, std::string
const& descName,
const unsigned int expectedSize)
90 ": Requires exactly " + to_string(expectedSize) +
"input(s). " +
96 void ValidateNumOutputs(
const WorkloadInfo& workloadInfo, std::string
const& descName,
const unsigned int expectedSize)
101 ": Requires exactly " + to_string(expectedSize) +
" output(s). " +
107 void ValidateTensorNumDimensions(
const TensorInfo& tensor,
108 std::string
const& descName,
109 unsigned int numDimensions,
110 std::string
const& tensorName)
116 tensorName +
" tensor.");
121 void ValidateTensorNumElements(
const TensorInfo& tensor,
122 std::string
const& descName,
123 unsigned int numElements,
124 std::string
const& tensorName)
130 tensorName +
" tensor.");
135 void ValidateTensorNumDimNumElem(
const TensorInfo& tensorInfo,
136 unsigned int numDimension,
137 unsigned int numElements,
138 std::string
const& tensorName)
140 const std::string functionName{
"ValidateTensorNumDimNumElem"};
141 ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
142 ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
147 const std::string& descName, std::string
const& tensorName)
156 void ValidPerAxisQuantizedDataType(
const TensorInfo& tensor,
const std::string& descName,
const std::string& tensorName)
160 tensor.
GetDataType() != DataType::QuantizedSymm8PerAxis)
163 ": Expected data type which supports per-axis quantization scheme but got " +
170 void ValidateTensorQuantizationSpace(
const TensorInfo& first,
172 const std::string& descName,
173 std::string
const& firstName,
174 std::string
const& secondName)
186 if (firstDataType != secondDataType)
189 " must be of the same quantized type, " +
197 " must have the same quantization space, " +
206 void ValidateBiasTensorQuantization(
const TensorInfo& biasTensor,
209 const std::string& descName)
212 auto VerifyBiasQuantizationScale = [&descName](
float biasScale,
float expectedScale) ->
void 214 constexpr
float tolerance = 0.000001f;
215 if (std::abs(biasScale - expectedScale) > tolerance)
218 std::stringstream msg;
219 msg << std::setprecision(10) << descName <<
": Expected " << expectedScale <<
220 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
238 if (weightScales.size() != biasScales.size())
240 std::stringstream msg;
241 msg << descName <<
": Expected matchhing number of per-axis quantization scales, but got different " 242 <<
"values: weights=" << weightScales.size() <<
", biases=" << biasScales.size();
246 for (
size_t i = 0ul; i < biasScales.size(); ++i)
249 VerifyBiasQuantizationScale(biasScales[i], expectedScale);
261 void ValidateTensors(
const std::vector<ITensorHandle*>& vec,
262 unsigned int numExpected,
263 const std::string& descName,
264 const std::string& varName)
266 if (vec.empty() && numExpected > 0)
271 for (
unsigned int i = 0; i < numExpected; ++i)
281 void ValidateBroadcastTensorShapesMatch(
const TensorInfo& first,
284 std::string
const& descName,
285 std::string
const& firstName,
286 std::string
const& secondName)
293 + firstName +
" & " + secondName
294 +
" must have the same number of dimensions in order to be broadcasted");
297 std::vector<uint32_t> outputDims(numDims, 0u);
298 for (uint32_t i = 0; i < numDims; i++)
301 const bool dimsNotOne = (first.
GetShape()[i] != 1) && (second.
GetShape()[i] != 1);
302 if (dimsNotEqual && dimsNotOne)
308 TensorShape broadcastShape =
TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
309 if (broadcastShape != output.
GetShape())
312 + firstName +
" & " + secondName
313 +
" does not match the output shape");
319 const std::vector<armnn::DataType>& supportedTypes,
320 std::string
const& descName)
322 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.
GetDataType());
323 if (iterator == supportedTypes.end())
330 void ValidateTensorDataTypesMatch(
const TensorInfo& first,
332 std::string
const& descName,
333 std::string
const& firstName,
334 std::string
const& secondName)
339 " must have identical data types.");
344 void ValidateTensorNumElementsMatch(
const TensorInfo& first,
346 std::string
const& descName,
347 std::string
const& firstName,
348 std::string
const& secondName)
353 " must have the same number of elements.");
357 void ValidateWeightDataType(
const TensorInfo& inputInfo,
359 const std::string& descName)
365 const std::vector<DataType> validTypes =
370 DataType::QuantizedSymm8PerAxis
374 ValidateDataTypes(weightInfo, validTypes, descName);
378 ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName,
"input",
"weight");
382 void ValidatePerAxisQuantizationDimension(
const TensorInfo& tensorInfo,
383 const std::string& descName,
384 const std::string& tensorName)
390 boost::format(
"%1%: Quantization dimension for per-axis quantization not set on tensor %2%.")
391 % descName % tensorName));
394 if (quantizationDim.
value() != 0)
397 boost::format(
"%1%: Quantization dimension for per-axis quantization expected to be 0 on tensor %2%, " 398 "but got: %3%") % descName % tensorName % quantizationDim.
value()));
402 void ValidatePerAxisQuantizationOffset(
const TensorInfo& tensorInfo,
403 const std::string& descName,
404 const std::string& tensorName)
407 if (quantizationOffset != 0)
410 boost::format(
"%1%: Quantization offset for per-axis quantization expected to be 0 on tensor %2%, " 411 "but got: %3%") % descName % tensorName % quantizationOffset));
415 void ValidatePerAxisQuantization(
const TensorInfo& inputInfo,
419 const std::string& descName)
426 const bool canHavePerAxisQuantization = (
IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
428 if (!canHavePerAxisQuantization)
431 boost::format(
"%1%: Per-axis quantization parameters set on tensor %2%, " 432 "but data type does not support per-axis quantization.") % descName %
"weight"));
436 ValidPerAxisQuantizedDataType(weightInfo, descName,
"weight");
437 ValidatePerAxisQuantizationDimension(weightInfo, descName,
"weight");
438 ValidatePerAxisQuantizationOffset(weightInfo, descName,
"weight");
446 boost::format(
"%1%: Per-axis quantization parameters not set on bias tensor, despite being set on " 447 "weight tensor.") % descName));
450 ValidateTensorDataType(biasInfo, DataType::Signed32, descName,
"bias");
451 ValidatePerAxisQuantizationDimension(biasInfo, descName,
"bias");
452 ValidatePerAxisQuantizationOffset(biasInfo, descName,
"bias");
460 unsigned int numExpectedIn,
unsigned int numExpectedOut)
const 462 ValidateTensors(
m_Inputs, numExpectedIn, descName,
"input");
463 ValidateTensors(
m_Outputs, numExpectedOut, descName,
"output");
469 const std::string descriptorName{
"MemCopyQueueDescriptor"};
471 ValidateNumInputs(workloadInfo, descriptorName, 1);
472 ValidateNumOutputs(workloadInfo, descriptorName , 1);
477 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
478 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
483 boost::format(
"%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
487 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
492 descriptorName % i));
498 descriptorName % i));
506 ValidateNumInputs(workloadInfo,
"MemImportQueueDescriptor", 1);
507 ValidateNumOutputs(workloadInfo,
"MemImportQueueDescriptor" , 1);
512 boost::format(
"Number of input infos (%1%) is not 1.")
520 boost::format(
"Number of input infos (%1%) does not match the number of output infos (%2%)")
530 boost::format(
"Number of elements for tensor input and output %1% does not match")
538 boost::format(
"Number of inputs (%1%) is not 1.")
545 boost::format(
"Number of inputs (%1%) does not match the number of outputs (%2%)")
549 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
566 ValidateNumInputs(workloadInfo,
"MemSyncQueueDescriptor", 1);
567 ValidateNumOutputs(workloadInfo,
"MemSyncQueueDescriptor" , 1);
572 boost::format(
"Number of inputs (%1%) is not 1.")
579 boost::format(
"Number of outputs (%1%) is not 0.")
592 const std::string descriptorName{
"ActivationQueueDescriptor"};
594 ValidateNumInputs(workloadInfo, descriptorName, 1);
595 ValidateNumOutputs(workloadInfo, descriptorName, 1);
600 std::vector<DataType> supportedTypes =
609 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
610 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
611 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
616 const std::string descriptorName{
"ArgMinMaxQueueDescriptor"};
618 ValidateNumInputs(workloadInfo, descriptorName, 1);
619 ValidateNumOutputs(workloadInfo, descriptorName, 1);
629 std::vector<DataType> supportedInputTypes =
638 ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
640 auto inputShape = inputTensorInfo.
GetShape();
641 auto outputShape = outputTensorInfo.
GetShape();
646 const std::string outputShapeError{
": Output tensor shape does not match shape inferred from input tensor."};
649 if (inputShape.GetNumDimensions() == 1)
651 if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
658 for (
unsigned int i = 0; i < unsignedAxis; ++i)
660 if (outputShape[i] != inputShape[i])
666 for (
auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
668 if (outputShape[i - 1] != inputShape[i])
678 const std::string descriptorName{
"SoftmaxQueueDescriptor"};
680 ValidateNumInputs(workloadInfo, descriptorName, 1);
681 ValidateNumOutputs(workloadInfo, descriptorName, 1);
686 std::vector<DataType> supportedTypes =
695 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
696 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
697 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
702 const std::string descriptorName{
"SplitterQueueDescriptor"};
704 ValidateNumInputs(workloadInfo, descriptorName, 1);
707 std::vector<DataType> supportedTypes =
721 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
723 const std::string outputName =
"output_" + std::to_string(i);
724 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input", outputName);
735 descriptorName +
": Number of split windows " 736 "has to match number of workloadInfo.m_OutputTensorInfos. " 737 "Number of windows: " +
738 to_string(m_ViewOrigins.size()) +
739 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.
m_OutputTensorInfos.size()));
744 for(
unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
751 "have the same dimensionality as the input tensor. " 752 "Window origin (index: " +
753 to_string(w) +
") has " + to_string(e.
m_Origin.size()) +
754 " dimensions, the input " 756 to_string(inputDims) +
" dimensions.");
758 for (
unsigned int i = 0; i < e.
m_Origin.size(); ++i)
764 "be smaller or equal than the size of the input in that coord.");
772 const std::string descriptorName{
"ConcatQueueDescriptor"};
774 ValidateNumOutputs(workloadInfo, descriptorName, 1);
794 if(m_Parameters.GetConcatAxis() > workloadInfo.
m_InputTensorInfos[0].GetShape().GetNumDimensions())
799 if (workloadInfo.
m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
807 descriptorName +
": Number of split windows " 808 "has to match number of workloadInfo.m_InputTensorInfos. " 809 "Number of windows: " +
810 to_string(m_ViewOrigins.size()) +
811 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.
m_InputTensorInfos.size()));
816 for(
unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
820 if (e.
m_Origin.size() != outputDims)
823 "have the same dimensionality as the output tensor. " 824 "Window origin (index: " +
825 to_string(w) +
") has " + to_string(e.
m_Origin.size()) +
826 " dimensions, the output " 828 to_string(outputDims) +
" dimensions.");
831 for (
unsigned int i = 0; i < e.
m_Origin.size(); ++i)
837 "be smaller or equal than the size of the output in that coord.");
843 std::vector<DataType> supportedTypes =
857 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
859 const std::string inputName =
"input_" + std::to_string(i);
860 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName,
"output");
866 const std::string descriptorName{
"StackQueueDescriptor"};
868 ValidateNumOutputs(workloadInfo, descriptorName, 1);
876 const TensorShape& inputShape = m_Parameters.m_InputShape;
895 "than the number of input dimensions.");
900 for (
unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
902 if (outputShape[i] != inputShape[i])
905 "match shape inferred from input tensor.");
909 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
912 "match shape inferred from input tensor.");
915 for (
unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.
GetNumDimensions() + 1; ++i)
917 if (outputShape[i] != inputShape[i-1])
920 "match shape inferred from input tensor.");
930 std::vector<DataType> supportedTypes =
940 ValidateDataTypes(workloadInfo.
m_InputTensorInfos[0], supportedTypes, descriptorName);
948 "input_" + std::to_string(i));
960 const std::string descriptorName{
"FullyConnectedQueueDescriptor"};
962 ValidateNumInputs(workloadInfo, descriptorName, 1);
963 ValidateNumOutputs(workloadInfo, descriptorName, 1);
968 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2,
"output");
975 ValidatePointer(m_Weight, descriptorName,
"weight");
977 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
978 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2,
"weight");
980 if (m_Parameters.m_BiasEnabled)
982 ValidatePointer(m_Bias, descriptorName,
"bias");
985 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
986 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
989 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1,
"bias");
993 std::vector<DataType> supportedTypes =
1001 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1002 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1007 const std::string descriptorName{
"NormalizationQueueDescriptor"};
1009 ValidateNumInputs(workloadInfo, descriptorName, 1);
1010 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1016 std::vector<DataType> supportedTypes =
1024 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1026 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1028 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1033 const std::string descriptorName{
"AdditionQueueDescriptor"};
1035 ValidateNumInputs(workloadInfo, descriptorName, 2);
1036 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1042 std::vector<DataType> supportedTypes =
1051 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1052 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1053 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1055 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
1056 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName,
"input_1",
"output");
1058 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1068 const std::string descriptorName{
"MultiplicationQueueDescriptor"};
1070 ValidateNumInputs(workloadInfo, descriptorName, 2);
1071 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1077 std::vector<DataType> supportedTypes =
1086 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1087 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1088 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1090 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
1091 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName,
"input_1",
"output");
1093 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1103 const std::string descriptorName{
"BatchNormalizationQueueDescriptor"};
1105 ValidateNumInputs(workloadInfo, descriptorName, 1);
1106 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1111 std::vector<DataType> supportedTypes =
1119 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1120 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1122 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1123 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1124 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1126 ValidatePointer(m_Mean, descriptorName,
"mean");
1127 ValidatePointer(m_Variance, descriptorName,
"variance");
1128 ValidatePointer(m_Beta, descriptorName,
"beta");
1129 ValidatePointer(m_Gamma, descriptorName,
"gamma");
1131 const TensorInfo& mean = m_Mean->GetTensorInfo();
1132 const TensorInfo& variance = m_Variance->GetTensorInfo();
1133 const TensorInfo& beta = m_Beta->GetTensorInfo();
1134 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1136 ValidateTensorNumDimensions(mean, descriptorName, 1,
"mean");
1137 ValidateTensorNumDimensions(variance, descriptorName, 1,
"variance");
1138 ValidateTensorNumDimensions(beta, descriptorName, 1,
"beta");
1139 ValidateTensorNumDimensions(gamma, descriptorName, 1,
"gamma");
1141 ValidateTensorShapesMatch(mean, variance, descriptorName,
"mean",
"variance");
1142 ValidateTensorShapesMatch(mean, beta, descriptorName,
"mean",
"beta");
1143 ValidateTensorShapesMatch(mean, gamma, descriptorName,
"mean",
"gamma");
1148 const std::string descriptorName{
"Convolution2dQueueDescriptor"};
1150 ValidateNumInputs(workloadInfo, descriptorName, 1);
1151 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1156 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1157 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1159 ValidatePointer(m_Weight, descriptorName,
"weight");
1161 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1162 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
1164 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1167 if (m_Parameters.m_BiasEnabled)
1169 ValidatePointer(m_Bias, descriptorName,
"bias");
1171 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1175 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1178 ValidatePerAxisQuantization(inputTensorInfo,
1181 optionalBiasTensorInfo,
1184 std::vector<DataType> supportedTypes =
1194 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1195 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1200 const std::string descriptorName{
"DepthwiseConvolution2dQueueDescriptor"};
1202 ValidateNumInputs(workloadInfo, descriptorName, 1);
1203 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1208 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1209 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1211 ValidatePointer(m_Weight, descriptorName,
"weight");
1213 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1214 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
1216 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1219 boost::str(boost::format(
"%1%: dilationX (provided %2%) and dilationY (provided %3%) " 1220 "cannot be smaller than 1.") % descriptorName %
1221 m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1224 const unsigned int channelIndex = (m_Parameters.m_DataLayout ==
DataLayout::NCHW) ? 1 : 3;
1228 const unsigned int numWeightChannelMultiplier = weightTensorInfo.
GetShape()[0];
1229 const unsigned int numWeightInputChannels = weightTensorInfo.
GetShape()[1];
1230 const unsigned int numWeightOutputChannels = outputTensorInfo.
GetShape()[channelIndex];
1231 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1234 boost::str(boost::format(
"%1%: output_channels (provided %2%) should be " 1235 "equal to input_channels (provided %3%) multiplied by channel_multiplier " 1236 "(provided %4%).") % descriptorName % numWeightOutputChannels %
1237 numWeightInputChannels % numWeightChannelMultiplier));
1240 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1243 if (m_Parameters.m_BiasEnabled)
1245 ValidatePointer(m_Bias, descriptorName,
"bias");
1247 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1250 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1253 ValidatePerAxisQuantization(inputTensorInfo,
1256 optionalBiasTensorInfo,
1259 std::vector<DataType> supportedTypes =
1268 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1269 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1274 const std::string descriptorName{
"PermuteQueueDescriptor"};
1276 ValidateNumInputs(workloadInfo, descriptorName, 1);
1277 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1284 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.
GetSize(),
"input");
1285 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.
GetSize(),
"output");
1287 for (
unsigned int i = 0u; i < mapping.
GetSize(); ++i)
1289 if (inputTensorInfo.
GetShape()[i] != outputTensorInfo.
GetShape()[mapping[i]])
1292 " (=" + to_string(inputTensorInfo.
GetShape()[i]) +
") " +
1293 "must match dst dimension " + to_string(mapping[i]) +
1294 " (=" + to_string(outputTensorInfo.
GetShape()[mapping[i]]) +
")");
1298 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1303 const std::string descriptorName{
"Pooling2dQueueDescriptor"};
1305 ValidateNumInputs(workloadInfo, descriptorName, 1);
1306 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1311 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1312 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1314 std::vector<DataType> supportedTypes =
1323 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1324 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1329 const std::string descriptorName{
"ResizeBilinearQueueDescriptor"};
1331 ValidateNumInputs(workloadInfo, descriptorName, 1);
1332 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1337 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1338 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1340 std::vector<DataType> supportedTypes =
1348 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1349 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1352 const unsigned int inputBatchSize = inputTensorInfo.
GetShape()[0];
1353 const unsigned int outputBatchSize = outputTensorInfo.
GetShape()[0];
1354 if (inputBatchSize != outputBatchSize)
1357 boost::str(boost::format(
"%1%: Input batch size (%2%) " 1358 "does not match output batch size (%3%)") %
1359 descriptorName % inputBatchSize % outputBatchSize));
1365 if (inputChannelCount != outputChannelCount)
1368 boost::str(boost::format(
"%1%: Input channel count (%2%) " 1369 "does not match output channel count (%3%)") %
1370 descriptorName % inputChannelCount % outputChannelCount));
1376 const std::string descriptorName{
"ResizeQueueDescriptor"};
1378 ValidateNumInputs(workloadInfo, descriptorName, 1);
1379 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1384 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1385 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1387 std::vector<DataType> supportedTypes =
1396 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1397 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1400 const unsigned int inputBatchSize = inputTensorInfo.
GetShape()[0];
1401 const unsigned int outputBatchSize = outputTensorInfo.
GetShape()[0];
1402 if (inputBatchSize != outputBatchSize)
1405 boost::str(boost::format(
"%1%: Input batch size (%2%) " 1406 "does not match output batch size (%3%)") %
1407 descriptorName % inputBatchSize % outputBatchSize));
1413 if (inputChannelCount != outputChannelCount)
1416 boost::str(boost::format(
"%1%: Input channel count (%2%) " 1417 "does not match output channel count (%3%)") %
1418 descriptorName % inputChannelCount % outputChannelCount));
1424 const std::string descriptorName{
"FakeQuantizationQueueDescriptor"};
1426 ValidateNumInputs(workloadInfo, descriptorName, 1);
1427 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1432 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2,
"input");
1433 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2,
"output");
1435 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1437 if (m_Parameters.m_Min > m_Parameters.m_Max)
1445 const std::string descriptorName{
"InstanceNormalizationQueueDescriptor"};
1447 ValidateNumInputs(workloadInfo, descriptorName, 1);
1448 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1458 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1461 std::vector<DataType> supportedTypes =
1467 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1468 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1473 const std::string descriptorName{
"L2NormalizationQueueDescriptor"};
1475 ValidateNumInputs(workloadInfo, descriptorName, 1);
1476 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1486 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1489 std::vector<DataType> supportedTypes =
1497 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1498 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1503 const std::string descriptorName{
"LogSoftmaxQueueDescriptor"};
1505 ValidateNumInputs(workloadInfo, descriptorName, 1);
1506 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1511 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1513 std::vector<DataType> supportedTypes =
1519 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1520 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1525 const std::string descriptorName{
"ConstantQueueDescriptor"};
1527 ValidateNumInputs(workloadInfo, descriptorName, 0);
1528 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1536 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName,
"constant",
"output");
1539 std::vector<DataType> supportedTypes =
1550 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1555 const std::string descriptorName{
"ReshapeQueueDescriptor"};
1557 ValidateNumInputs(workloadInfo, descriptorName, 1);
1558 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1563 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1566 std::vector<DataType> supportedTypes =
1576 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1577 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1582 const std::string descriptorName{
"SpaceToBatchNdQueueDescriptor"};
1584 ValidateNumInputs(workloadInfo, descriptorName, 1);
1585 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1590 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1591 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1593 if (m_Parameters.m_BlockShape.size() != 2)
1598 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1601 "dimensions as Block Shape.");
1606 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1607 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1611 const unsigned int inputWidth = inputShape[dimensionIndices.
GetWidthIndex()] +
1612 widthPad.first + widthPad.second;
1613 const unsigned int inputHeight = inputShape[dimensionIndices.
GetHeightIndex()] +
1614 heightPad.first + heightPad.second;
1616 const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1618 const unsigned int numOutputElements = outputTensorInfo.
GetNumElements();
1620 if (numOutputElements != numInputElements)
1623 to_string(numInputElements) +
" after padding but output tensor has " +
1624 to_string(numOutputElements) +
" elements.");
1627 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1630 "divisible by Block Shape in all spatial dimensions");
1633 std::vector<DataType> supportedTypes =
1641 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1642 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1647 const std::string descriptorName{
"SpaceToDepthQueueDescriptor"};
1649 ValidateNumInputs(workloadInfo, descriptorName, 1);
1650 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1655 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1656 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1658 std::vector<DataType> supportedTypes =
1666 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1667 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1669 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1671 if (m_Parameters.m_BlockSize == 0)
1677 const unsigned int wIndex = dimensionIndices.
GetWidthIndex();
1682 if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1685 "by block size in all spatial dimensions");
1689 if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1692 "must be divisible by the square of block size." );
1698 const std::string descriptorName{
"FloorQueueDescriptor"};
1700 ValidateNumInputs(workloadInfo, descriptorName, 1);
1701 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1706 std::vector<DataType> supportedTypes =
1713 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1715 if (inputTensorInfo != outputTensorInfo)
1725 const std::string descriptorName{
"LstmQueueDescriptor"};
1737 std::vector<DataType> supportedTypes =
1745 ValidateDataTypes(workloadInfo.
m_InputTensorInfos[0], supportedTypes, descriptorName);
1754 "input_" + std::to_string(i));
1761 "LstmQueueDescriptor",
1763 "output_" + std::to_string(i));
1769 if (m_Parameters.m_ClippingThresCell < 0.0f)
1773 if (m_Parameters.m_ClippingThresProj < 0.0f)
1782 ValidatePointer(m_InputToOutputWeights,
"Null pointer check",
"InputToOutputWeights");
1783 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1784 ValidatePointer(m_RecurrentToOutputWeights,
"Null pointer check",
"RecurrentToOutputWeights");
1785 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1788 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[0], 2, (n_batch * n_input),
1789 descriptorName +
" input_0");
1791 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[1], 2, (n_batch * n_output),
1792 descriptorName +
" input_1");
1794 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[2], 2, (n_batch * n_cell),
1795 descriptorName +
" input_2");
1797 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1798 ValidateTensorNumDimNumElem(workloadInfo.
m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1799 descriptorName +
" output_0");
1802 descriptorName +
" output_1");
1805 descriptorName +
" output_2");
1808 descriptorName +
" output_3");
1812 if ( m_InputToInputWeights )
1814 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1815 (n_cell * n_input),
"InputLayerNormWeights");
1818 ValidatePointer(m_InputToForgetWeights,
"Null pointer check",
"InputToForgetWeights");
1819 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1820 (n_cell * n_input),
"InputToForgetWeights");
1822 ValidatePointer(m_InputToCellWeights,
"Null pointer check",
"InputToCellWeights");
1823 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1824 (n_cell * n_input),
"InputToCellWeights");
1826 if ( m_RecurrentToInputWeights )
1828 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1829 (n_cell * n_output),
"RecurrentToInputWeights");
1832 ValidatePointer(m_RecurrentToForgetWeights,
"Null pointer check",
"RecurrentToForgetWeights");
1833 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1834 (n_cell * n_output),
"RecurrentToForgetWeights");
1836 ValidatePointer(m_RecurrentToCellWeights,
"Null pointer check",
"RecurrentToCellWeights");
1837 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1838 (n_cell * n_output),
"RecurrentToCellWeights");
1842 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1843 !m_Parameters.m_CifgEnabled) ||
1844 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1845 m_Parameters.m_CifgEnabled));
1846 if (!cifg_weights_all_or_none)
1849 "RecurrentToInputWeights must either both be present (regular LSTM) " 1850 "or both not present (CIFG-LSTM). In addition CifgEnable must be set " 1854 if ( m_CellToInputWeights )
1856 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1857 n_cell,
"CellToInputWeights");
1859 if ( m_CellToForgetWeights )
1861 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1862 n_cell,
"CellToForgetWeights");
1864 if ( m_CellToOutputWeights )
1866 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1867 n_cell,
"CellToOutputWeights");
1871 bool peephole_weights_all_or_none =
1872 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1873 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1874 || ( !m_CellToInputWeights && !m_CellToForgetWeights
1875 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1876 if (!peephole_weights_all_or_none)
1882 if (m_Parameters.m_CifgEnabled)
1884 if (m_InputGateBias)
1891 if (!m_InputGateBias)
1894 "must be present.");
1896 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1897 n_cell,
"InputGateBias");
1900 ValidatePointer(m_ForgetGateBias,
"Null pointer check",
"ForgetGateBias");
1901 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell,
"ForgetGateBias");
1903 ValidatePointer(m_CellBias,
"Null pointer check",
"CellBias");
1904 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell,
"CellBias");
1906 ValidatePointer(m_OutputGateBias,
"Null pointer check",
"OutputGateBias");
1907 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell,
"OutputGateBias");
1909 if (m_ProjectionWeights)
1911 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1912 (n_cell * n_output),
"ProjectionWeights");
1914 if (m_ProjectionBias)
1916 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output,
"ProjectionBias");
1923 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1924 !m_Parameters.m_ProjectionEnabled)
1925 || (m_ProjectionWeights && !m_ProjectionBias &&
1926 m_Parameters.m_ProjectionEnabled)
1927 || (m_ProjectionWeights && m_ProjectionBias &&
1928 m_Parameters.m_ProjectionEnabled));
1929 if (!projecton_tensors_consistent)
1938 if (m_InputLayerNormWeights)
1940 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"InputLayerNormWeights");
1942 if (m_ForgetLayerNormWeights)
1944 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell,
"ForgetLayerNormWeights");
1946 if (m_CellLayerNormWeights)
1948 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell,
"CellLayerNormWeights");
1950 if (m_OutputLayerNormWeights)
1952 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"OutputLayerNormWeights");
1955 if (m_Parameters.m_LayerNormEnabled)
1957 if (!m_Parameters.m_CifgEnabled)
1959 if (!m_InputLayerNormWeights)
1962 "disabled but InputLayerNormWeights are not present");
1964 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1965 1, n_cell,
"InputLayerNormWeights");
1967 else if (m_InputLayerNormWeights)
1973 ValidatePointer(m_ForgetLayerNormWeights,
"Null pointer check layer normalisation enabled",
1974 "ForgetLayerNormWeights");
1975 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell,
"ForgetLayerNormWeights");
1977 ValidatePointer(m_OutputLayerNormWeights,
"Null pointer check layer normalisation enabled",
1978 "OutputLayerNormWeights");
1979 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"OutputLayerNormWeights");
1981 ValidatePointer(m_CellLayerNormWeights,
"Null pointer check layer normalisation enabled",
1982 "CellLayerNormWeights");
1983 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell,
"CellLayerNormWeights");
1985 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
1988 "normalisation weights are present.");
1994 const std::string descriptorName{
"ConvertFp32ToFp16QueueDescriptor"};
1996 ValidateNumInputs(workloadInfo, descriptorName, 1);
1997 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2012 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2017 const std::string descriptorName{
"ConvertFp16ToFp32QueueDescriptor"};
2019 ValidateNumInputs(workloadInfo, descriptorName, 1);
2020 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2035 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2040 const std::string descriptorName{
"DivisionQueueDescriptor"};
2042 ValidateNumInputs(workloadInfo, descriptorName, 2);
2043 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2049 std::vector<DataType> supportedTypes =
2057 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2058 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2059 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2061 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2071 const std::string descriptorName{
"SubtractionQueueDescriptor"};
2073 ValidateNumInputs(workloadInfo, descriptorName, 2);
2074 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2080 std::vector<DataType> supportedTypes =
2088 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2089 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2090 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2092 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2102 const std::string descriptorName{
"MaximumQueueDescriptor"};
2104 ValidateNumInputs(workloadInfo, descriptorName, 2);
2105 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2111 std::vector<DataType> supportedTypes =
2121 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2122 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2123 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2125 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2135 const std::string descriptorName{
"MeanQueueDescriptor"};
2137 ValidateNumInputs(workloadInfo, descriptorName, 1);
2138 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2143 std::vector<DataType> supportedTypes =
2153 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2154 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2156 if (m_Parameters.m_KeepDims)
2158 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.
GetNumDimensions(),
"output");
2160 else if (m_Parameters.m_Axis.empty())
2162 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1,
"output");
2166 unsigned int outputDim =
2167 inputTensorInfo.
GetNumDimensions() - boost::numeric_cast<
unsigned int>(m_Parameters.m_Axis.size());
2168 ValidateTensorNumDimensions(outputTensorInfo,
2170 outputDim > 0 ? outputDim : 1,
2177 const std::string descriptorName{
"PadQueueDescriptor"};
2179 ValidateNumInputs(workloadInfo, descriptorName, 1);
2180 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2186 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.
GetNumDimensions(),
"output");
2191 "as there are dimensions in the input tensor that is " +
2193 " not " + std::to_string(m_Parameters.m_PadList.size()) +
" entries.");
2199 const std::string descriptorName{
"QuantizeQueueDescriptor"};
2201 ValidateNumInputs(workloadInfo, descriptorName, 1);
2202 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2207 std::vector<DataType> supportedTypes =
2217 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2227 const std::string descriptorName{
"BatchToSpaceNdQueueDescriptor"};
2229 ValidateNumInputs(workloadInfo, descriptorName, 1);
2230 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2235 std::vector<DataType> supportedTypes =
2243 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2244 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2249 const std::string descriptorName{
"StridedSliceQueueDescriptor"};
2251 ValidateNumInputs(workloadInfo, descriptorName, 1);
2252 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2257 std::vector<DataType> supportedTypes =
2265 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2266 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2268 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2277 if (m_Parameters.m_Begin.size() != rank)
2282 if (m_Parameters.m_End.size() != rank)
2287 if (m_Parameters.m_Stride.size() != rank)
2293 for (
auto& stride : m_Parameters.m_Stride)
2304 const std::string descriptorName{
"MinimumQueueDescriptor"};
2306 ValidateNumInputs(workloadInfo, descriptorName, 2);
2307 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2313 std::vector<DataType> supportedTypes =
2322 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2323 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2324 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2326 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2336 const std::string descriptorName{
"DebugQueueDescriptor"};
2338 ValidateNumInputs(workloadInfo, descriptorName, 1);
2339 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2344 const std::string descriptorName{
"EqualQueueDescriptor"};
2346 ValidateNumInputs(workloadInfo, descriptorName, 2);
2347 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2353 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2368 const std::string descriptorName{
"GreaterQueueDescriptor"};
2370 ValidateNumInputs(workloadInfo, descriptorName, 2);
2371 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2377 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2392 const std::string descriptorName{
"RsqrtQueueDescriptor"};
2394 ValidateNumInputs(workloadInfo, descriptorName, 1);
2395 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2400 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2402 std::vector<DataType> supportedTypes =
2410 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2411 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2416 const std::string descriptorName{
"GatherQueueDescriptor"};
2418 ValidateNumInputs(workloadInfo, descriptorName, 2);
2419 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2430 std::vector<DataType> supportedTypes =
2438 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2440 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2443 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim,
"output");
2448 const std::string& descriptorName{
"DetectionPostProcessQueueDescriptor"};
2450 ValidateNumInputs(workloadInfo, descriptorName, 2);
2458 if (m_Anchors ==
nullptr)
2472 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3,
"box encodings");
2473 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3,
"scores");
2474 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2,
"anchors");
2476 const std::vector<DataType> supportedInputTypes =
2484 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2485 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2486 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2488 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3,
"detection boxes");
2489 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2,
"detection scores");
2490 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2,
"detection classes");
2491 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1,
"num detections");
2494 ValidateTensorDataType(detectionBoxesInfo,
DataType::Float32, descriptorName,
"detection boxes");
2495 ValidateTensorDataType(detectionScoresInfo,
DataType::Float32, descriptorName,
"detection scores");
2496 ValidateTensorDataType(detectionClassesInfo,
DataType::Float32, descriptorName,
"detection classes");
2497 ValidateTensorDataType(numDetectionsInfo,
DataType::Float32, descriptorName,
"num detections");
2499 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2502 "must be positive and less than or equal to 1.");
2505 if (scoresInfo.
GetShape()[2] != m_Parameters.m_NumClasses + 1)
2508 "should be equal to number of classes + 1.");
2514 const std::string& descriptorName{
"DequantizeQueueDescriptor"};
2516 ValidateNumInputs(workloadInfo, descriptorName, 1);
2517 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2527 std::vector<DataType> supportedTypes =
2533 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2538 const std::string& descriptorName{
"MergeQueueDescriptor"};
2540 ValidateNumInputs(workloadInfo, descriptorName, 2);
2541 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2547 ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
2548 ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName,
"input_0",
"output");
2550 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
2551 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName,
"input_0",
"output");
2556 const std::string& descriptorName{
"SwitchQueueDescriptor"};
2558 ValidateNumInputs(workloadInfo, descriptorName, 2);
2559 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2567 std::vector<DataType> supportedTypes =
2574 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2575 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2577 ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2578 ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2580 ValidateTensorShapesMatch(inputTensorInfo0,
2586 ValidateTensorShapesMatch(inputTensorInfo0,
2600 const std::string& descriptorName{
"PreluQueueDescriptor"};
2602 ValidateNumInputs(workloadInfo, descriptorName, 2);
2603 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2609 std::vector<DataType> supportedTypes
2617 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2618 ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2620 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2622 ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName,
"input",
"alpha");
2623 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"ouptut");
2625 ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2635 const std::string descriptorName{
"TransposeConvolution2dQueueDescriptor"};
2637 ValidateNumInputs(workloadInfo, descriptorName, 1);
2638 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2643 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
2644 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
2646 ValidatePointer(m_Weight, descriptorName,
"weight");
2648 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2649 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
2651 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2654 if (m_Parameters.m_BiasEnabled)
2656 ValidatePointer(m_Bias, descriptorName,
"bias");
2658 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2662 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2665 ValidatePerAxisQuantization(inputTensorInfo,
2668 optionalBiasTensorInfo,
2671 std::vector<DataType> supportedTypes =
2679 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2680 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2685 const std::string descriptorName{
"QuantizedLstmQueueDescriptor"};
2688 ValidateNumInputs(workloadInfo, descriptorName, 3);
2689 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2699 std::vector<DataType> inputOutputSupportedTypes =
2704 std::vector<DataType> cellStateSupportedTypes =
2709 std::vector<DataType> weightsSupportedTypes =
2714 std::vector<DataType> biasSupportedTypes =
2720 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2721 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2722 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2724 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2725 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2728 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
2729 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2730 "outputStateIn",
"outputStateOut");
2731 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
2734 ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
2735 ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName,
"input",
"outputStateOut");
2736 ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
2739 const uint32_t numBatches = inputInfo.GetShape()[0];
2740 const uint32_t inputSize = inputInfo.GetShape()[1];
2741 const uint32_t outputSize = cellStateInInfo.GetShape()[1];
2744 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName +
" input");
2745 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName +
" cellStateIn");
2746 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateIn");
2747 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" cellStateOut");
2748 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateOut");
2751 ValidatePointer(m_InputToInputWeights, descriptorName,
"InputToInputWeights");
2752 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
2753 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize),
" InputToInputWeights");
2755 ValidatePointer(m_InputToForgetWeights, descriptorName,
"InputToForgetWeights");
2756 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2757 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize),
" InputToForgetWeights");
2759 ValidatePointer(m_InputToCellWeights, descriptorName,
"InputToCellWeights");
2760 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2761 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize),
" InputToCellWeights");
2763 ValidatePointer(m_InputToOutputWeights, descriptorName,
"InputToOutputWeights");
2764 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2765 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize),
" InputToOutputWeights");
2767 ValidatePointer(m_RecurrentToInputWeights, descriptorName,
"RecurrentToInputWeights");
2768 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
2769 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToInputWeights");
2771 ValidatePointer(m_RecurrentToForgetWeights, descriptorName,
"RecurrentToForgetWeights");
2772 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2773 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
2774 " RecurrentToForgetWeights");
2776 ValidatePointer(m_RecurrentToCellWeights, descriptorName,
"RecurrentToCellWeights");
2777 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2778 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToCellWeights");
2780 ValidatePointer(m_RecurrentToOutputWeights, descriptorName,
"RecurrentToOutputWeights");
2781 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2782 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToCellWeights");
2785 ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
2787 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
2788 "inputToInputWeights",
"inputToForgetWeights");
2789 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
2790 "inputToInputWeights",
"inputToCellWeights");
2791 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2792 "inputToInputWeights",
"inputToOutputWeights");
2794 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
2795 "inputToInputWeights",
"recurrentToInputWeights");
2796 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2797 "inputToInputWeights",
"recurrentToForgeteights");
2798 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2799 "inputToInputWeights",
"recurrentToCellWeights");
2800 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2801 "inputToInputWeights",
"recurrentToOutputWeights");
2804 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
2805 descriptorName,
"inputToInputWeights",
"inputToForgetWeights");
2806 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
2807 descriptorName,
"inputToInputWeights",
"inputToCellWeights");
2808 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
2809 descriptorName,
"inputToInputWeights",
"inputToOutputWeights");
2811 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
2812 descriptorName,
"inputToInputWeights",
"recurrentToInputWeights");
2813 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
2814 descriptorName,
"inputToInputWeights",
"recurrentToForgetWeights");
2815 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
2816 descriptorName,
"inputToInputWeights",
"recurrentToCellWeights");
2817 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
2818 descriptorName,
"inputToInputWeights",
"recurrentToOutputWeights");
2821 ValidatePointer(m_InputGateBias, descriptorName,
"InputGateBias");
2822 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
2823 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize,
" InputGateBias");
2825 ValidatePointer(m_ForgetGateBias, descriptorName,
"ForgetGateBias");
2826 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2827 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize,
" ForgetGateBias");
2829 ValidatePointer(m_CellBias, descriptorName,
"CellBias");
2830 auto cellBiasInfo = m_CellBias->GetTensorInfo();
2831 ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize,
" CellBias");
2833 ValidatePointer(m_OutputGateBias, descriptorName,
"OutputGateBias");
2834 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2835 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize,
" OutputGateBias");
2838 ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
2840 ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
2841 "inputGateBias",
"forgetGateBias");
2842 ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
2843 "inputGateBias",
"cellBias");
2844 ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
2845 "inputGateBias",
"outputGateBias");
2848 ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2849 ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2850 ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2851 ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2856 const std::string descriptorName{
"AbsQueueDescriptor"};
2858 ValidateNumInputs(workloadInfo, descriptorName, 1);
2859 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2864 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2866 std::vector<DataType> supportedTypes =
2874 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2875 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2880 const std::string descriptorName{
"SliceQueueDescriptor"};
2882 ValidateNumInputs(workloadInfo, descriptorName, 1);
2883 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2888 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2896 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank,
"output");
2899 if (m_Parameters.m_Begin.size() != rank)
2902 ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
2904 if (m_Parameters.m_Size.size() != rank)
2907 ": Length of size descriptor must equal rank " + std::to_string(rank));
2912 for (
unsigned int i = 0u; i < rank; ++i)
2914 if (m_Parameters.m_Size[i] != outputShape[i])
2923 for(
unsigned int i = 0u; i < rank; ++i)
2925 if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
2928 std::to_string(i) +
" exceeds input size.");
2935 const std::string descriptorName{
"DepthToSpaceQueueDescriptor"};
2937 ValidateNumInputs(workloadInfo, descriptorName, 1);
2938 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2943 ValidateTensorNumDimensions(inputInfo, descriptorName, 4,
"input");
2944 ValidateTensorNumDimensions(outputInfo, descriptorName, 4,
"output");
2946 std::vector<DataType> supportedTypes =
2954 ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
2955 ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
2957 ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName,
"input",
"output");
2959 if (m_Parameters.m_BlockSize == 0)
2965 const unsigned int wIndex = dimensionIndices.
GetWidthIndex();
2970 if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
2973 "must be divisible by block size.");
2977 if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
2980 "must be divisible by the square of block size." );
2986 const std::string descriptorName{
"ComparisonQueueDescriptor"};
2988 ValidateNumInputs(workloadInfo, descriptorName, 2);
2989 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2995 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3010 const std::string descriptorName{
"ElementwiseUnaryQueueDescriptor"};
3012 ValidateNumInputs(workloadInfo, descriptorName, 1);
3013 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3018 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3020 std::vector<DataType> supportedTypes =
3028 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3029 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
unsigned int GetNumDimensions() const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr bool IsQuantizedType()
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Optional< unsigned int > GetQuantizationDim() const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasMultipleQuantizationScales() const
std::vector< float > GetQuantizationScales() const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetChannelsIndex() const
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetDataType() const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin
void Validate(const WorkloadInfo &workloadInfo) const
const TensorShape & GetShape() const
void Validate(const WorkloadInfo &workloadInfo) const
bool has_value() const noexcept
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
float GetQuantizationScale() const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr bool IsQuantized8BitType(DataType dataType)
#define ARMNN_NO_DEPRECATE_WARN_END
std::vector< unsigned int > m_Origin