7 #include <half/half.hpp> 14 using Half = half_float::half;
23 switch (operation.type)
25 case OperationType::ABS:
26 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
27 case OperationType::ADD:
28 return ConvertAdd(operation, model, data);
29 case OperationType::ARGMAX:
30 return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
31 case OperationType::ARGMIN:
32 return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
33 case OperationType::AVERAGE_POOL_2D:
34 return ConvertAveragePool2d(operation, model, data);
35 case OperationType::BATCH_MATMUL:
36 return ConvertBatchMatMul(operation, model, data);
37 case OperationType::BATCH_TO_SPACE_ND:
38 return ConvertBatchToSpaceNd(operation, model, data);
39 case OperationType::CAST:
40 return ConvertCast(operation, model, data);
41 case OperationType::CONCATENATION:
42 return ConvertConcatenation(operation, model, data);
43 case OperationType::CONV_2D:
44 return ConvertConv2d(operation, model, data);
45 case OperationType::DEPTH_TO_SPACE:
46 return ConvertDepthToSpace(operation, model, data);
47 case OperationType::DEPTHWISE_CONV_2D:
48 return ConvertDepthwiseConv2d(operation, model, data);
49 case OperationType::DEQUANTIZE:
50 return ConvertDequantize(operation, model, data);
51 case OperationType::DIV:
52 return ConvertDiv(operation, model, data);
53 case OperationType::ELU:
54 return ConvertElu(operation, model, data);
55 case OperationType::EQUAL:
56 return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
57 case OperationType::EXP:
58 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Exp);
59 case OperationType::EXPAND_DIMS:
60 return ConvertExpandDims(operation, model, data);
61 case OperationType::FILL:
62 return ConvertFill(operation, model, data);
63 case OperationType::FLOOR:
64 return ConvertFloor(operation, model, data);
65 case OperationType::FULLY_CONNECTED:
66 return ConvertFullyConnected(operation, model, data);
67 case OperationType::GATHER:
68 return ConvertGather(operation, model, data);
69 case OperationType::GREATER:
70 return ConvertComparison(operation, model, data, ComparisonOperation::Greater);
71 case OperationType::GREATER_EQUAL:
72 return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
73 case OperationType::GROUPED_CONV_2D:
74 return ConvertGroupedConv2d(operation, model, data);
75 case OperationType::HARD_SWISH:
76 return ConvertHardSwish(operation, model, data);
77 case OperationType::INSTANCE_NORMALIZATION:
78 return ConvertInstanceNormalization(operation, model, data);
79 case OperationType::L2_NORMALIZATION:
80 return ConvertL2Normalization(operation, model, data);
81 case OperationType::L2_POOL_2D:
82 return ConvertL2Pool2d(operation, model, data);
83 case OperationType::LESS:
84 return ConvertComparison(operation, model, data, ComparisonOperation::Less);
85 case OperationType::LESS_EQUAL:
86 return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
87 case OperationType::LOCAL_RESPONSE_NORMALIZATION:
88 return ConvertLocalResponseNormalization(operation, model, data);
89 case OperationType::LOG:
90 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Log);
91 case OperationType::LOGICAL_AND:
92 return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
93 case OperationType::LOGICAL_NOT:
94 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
95 case OperationType::LOGICAL_OR:
96 return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
97 case OperationType::LOGISTIC:
98 return ConvertLogistic(operation, model, data);
99 case OperationType::LOG_SOFTMAX:
100 return ConvertLogSoftmax(operation, model, data);
101 case OperationType::LSTM:
102 return ConvertLstm(operation, model, data);
103 case OperationType::MAX_POOL_2D:
104 return ConvertMaxPool2d(operation, model, data);
105 case OperationType::MAXIMUM:
106 return ConvertMaximum(operation, model, data);
107 case OperationType::MEAN:
108 return ConvertMean(operation, model, data);
109 case OperationType::MINIMUM:
110 return ConvertMinimum(operation, model, data);
111 case OperationType::MUL:
112 return ConvertMul(operation, model, data);
113 case OperationType::NEG:
114 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
115 case OperationType::NOT_EQUAL:
116 return ConvertComparison(operation, model, data, ComparisonOperation::NotEqual);
117 case OperationType::PAD:
118 return ConvertPad(operation, model, data);
119 case OperationType::PAD_V2:
120 return ConvertPadV2(operation, model, data);
121 case OperationType::PRELU:
122 return ConvertPrelu(operation, model, data);
123 case OperationType::QUANTIZE:
124 return ConvertQuantize(operation, model, data);
125 case OperationType::QUANTIZED_LSTM:
126 return ConvertQuantizedLstm(operation, model, data);
127 case OperationType::QUANTIZED_16BIT_LSTM:
128 return ConvertQuantized16BitLstm(operation, model, data);
129 case OperationType::RANK:
130 return ConvertRank(operation, model, data);
131 case OperationType::REDUCE_MAX:
133 case OperationType::REDUCE_MIN:
135 case OperationType::REDUCE_SUM:
137 case OperationType::RELU:
138 return ConvertReLu(operation, model, data);
139 case OperationType::RELU1:
140 return ConvertReLu1(operation, model, data);
141 case OperationType::RELU6:
142 return ConvertReLu6(operation, model, data);
143 case OperationType::RESHAPE:
144 return ConvertReshape(operation, model, data);
145 case OperationType::RESIZE_BILINEAR:
146 return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
147 case OperationType::RESIZE_NEAREST_NEIGHBOR:
148 return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
149 case OperationType::RSQRT:
150 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
151 case OperationType::SIN:
152 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Sin);
153 case OperationType::SOFTMAX:
154 return ConvertSoftmax(operation, model, data);
155 case OperationType::SPACE_TO_BATCH_ND :
156 return ConvertSpaceToBatchNd(operation, model, data);
157 case OperationType::SPACE_TO_DEPTH:
158 return ConvertSpaceToDepth(operation, model, data);
159 case OperationType::SQRT:
160 return ConvertSqrt(operation, model, data);
161 case OperationType::SQUEEZE:
162 return ConvertSqueeze(operation, model, data);
163 case OperationType::STRIDED_SLICE:
164 return ConvertStridedSlice(operation, model, data);
165 case OperationType::SUB:
166 return ConvertSub(operation, model, data);
167 case OperationType::TRANSPOSE:
168 return ConvertTranspose(operation, model, data);
169 case OperationType::TRANSPOSE_CONV_2D:
170 return ConvertTransposeConv2d(operation, model, data);
171 case OperationType::TANH:
172 return ConvertTanH(operation, model, data);
174 VLOG(DRIVER) <<
"Operation type: " << operation.type <<
"is not supported in ArmnnDriver";
181 VLOG(DRIVER) <<
"Converter::ConvertAdd()";
187 return Fail(
"%s: Operation has invalid inputs", __func__);
191 ActivationFn activationFunction;
194 return Fail(
"%s: Operation has invalid inputs", __func__);
208 bool isSupported =
false;
222 validateFunc(outputInfo, isSupported);
236 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
237 if (!isReshapeSupported)
243 data,
nullptr, validateFunc, activationFunction);
246 bool Converter::ConvertArgMinMax(
const Operation& operation,
251 VLOG(DRIVER) <<
"Converter::ConvertArgMinMax()";
258 return Fail(
"%s: Operation has invalid inputs", __func__);
262 if (!
GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
264 return Fail(
"%s: Operation has invalid inputs. Failed to read axis.", __func__);
270 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
275 return Fail(
"%s: Axis must be in range [-n, n)", __func__);
281 return Fail(
"%s: Could not read output 0", __func__);
292 bool isSupported =
false;
297 IsArgMinMaxSupported,
311 validateFunc(outputInfo, isSupported);
320 assert(layer !=
nullptr);
329 VLOG(DRIVER) <<
"Converter::ConvertAveragePool2d()";
330 return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Average, model, data);
335 VLOG(DRIVER) <<
"Converter::ConvertBatchMatMul()";
341 return Fail(
"%s: Operation has invalid inputs", __func__);
348 if (rankInput0 > 4 || rankInput0 < 2)
350 Fail(
"%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
354 if (rankInput1 > 4 || rankInput1 < 2)
356 Fail(
"%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
363 return Fail(
"%s: Operation has invalid inputs", __func__);
370 return Fail(
"%s: Operation has invalid inputs", __func__);
373 if (input0Type != input1Type)
375 return Fail(
"%s: Operation has invalid inputs (Inputs must have same OperandCode)", __func__);
381 return Fail(
"%s: Could not read output 0", __func__);
393 bool isSupported =
false;
397 IsBatchMatMulSupported,
408 validateFunc(outputInfo, isSupported);
422 assert(layer !=
nullptr);
431 VLOG(DRIVER) <<
"Converter::ConvertBatchToSpaceNd()";
435 return Fail(
"%s: Operation has invalid inputs", __func__);
441 return Fail(
"%s: Could not read output 0", __func__);
449 return Fail(
"%s: Could not read input 1", __func__);
453 std::vector<int32_t> block;
456 return Fail(
"%s: Input 1 has invalid values", __func__);
464 Fail(
"%s: Only inputs with rank equal to 4 are supported", __func__);
467 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){
return i < 1; }))
469 return Fail(
"%s: Block sizes for each spatial dimension of the input tensor must be" 470 " greater than or equal to 1", __func__);
474 batchToSpaceNdDesc.
m_BlockShape.assign(block.cbegin(), block.cend());
477 if (Is12OrLaterOperand(*output))
482 batchToSpaceNdDesc.
m_Crops = {{0, 0}, {0, 0}};
484 bool isSupported =
false;
498 validateFunc(outputInfo, isSupported);
512 assert(layer !=
nullptr);
520 VLOG(DRIVER) <<
"Converter::ConvertCast()";
526 return Fail(
"%s: Operation has invalid inputs", __func__);
532 return Fail(
"%s: Could not read output 0", __func__);
538 bool isSupported =
false;
552 validateFunc(outputInfo, isSupported);
565 assert(layer !=
nullptr);
571 bool Converter::ConvertComparison(
const Operation& operation,
576 VLOG(DRIVER) <<
"Converter::ConvertComparison()";
584 return Fail(
"%s: Operation has invalid inputs", __func__);
590 return Fail(
"%s: Could not read output 0", __func__);
599 bool isSupported =
false;
603 IsComparisonSupported,
614 validateFunc(outputInfo, isSupported);
627 assert(layer !=
nullptr);
629 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
630 if (!isReshapeSupported)
647 VLOG(DRIVER) <<
"Converter::ConvertConcatenation()";
650 if (operation.inputs.size() <= 1)
652 return Fail(
"%s: Operation has insufficient arguments", __func__);
656 const std::size_t numInputTensors = operation.inputs.size() - 1;
659 if (!
GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
661 return Fail(
"%s: Operation has invalid inputs", __func__);
667 return Fail(
"%s: Operation has no outputs", __func__);
683 if (concatDim >= static_cast<int32_t>(outputShape.
GetNumDimensions()) || concatDim < 0)
685 return Fail(
"%s: Operation has invalid concat axis: %d", __func__, concatDim);
688 std::vector<LayerInputHandle> inputHandles;
689 std::vector<armnn::TensorShape> inputShapes;
691 inputHandles.reserve(numInputTensors);
692 inputShapes.reserve(numInputTensors);
694 bool inputsHaveBeenReshaped =
false;
695 unsigned int tensorDimensionsAdded = 0;
696 for (uint32_t i = 0; i < numInputTensors; ++i)
701 return Fail(
"%s: Operation has invalid inputs", __func__);
705 if (!operandInputHandle.
IsValid())
707 return Fail(
"%s: Operation has invalid inputs", __func__);
713 return Fail(
"%s: Operands with rank 0 are not supported", __func__);
716 if (RequiresReshape(operandShape))
718 inputsHaveBeenReshaped =
true;
726 tensorDimensionsAdded = 1;
731 tensorDimensionsAdded = 2;
737 bool isSupported =
false;
753 operandShape = reshapeInfo.
GetShape();
757 inputShapes.emplace_back(operandShape);
758 inputHandles.emplace_back(operandInputHandle);
760 if (!inputHandles.back().IsValid())
762 return Fail(
"%s: Operation has invalid inputs", __func__);
766 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
768 if (inputsHaveBeenReshaped)
771 concatDim += tensorDimensionsAdded;
774 if (tensorDimensionsAdded == 1)
785 else if (tensorDimensionsAdded == 2)
800 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
801 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
802 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
807 if (!isDynamicTensor)
818 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
833 }
catch (std::exception&
error)
835 return Fail(
"%s: Error preparing concat descriptor. %s", __func__, error.what());
840 if (!isDynamicTensor)
842 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
844 return Fail(
"%s: Error validating the output shape for concat", __func__);
848 std::vector<const armnn::TensorInfo*> inputTensorInfos;
849 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
852 bool isSupported =
false;
855 outputInfo, concatDescriptor);
858 if (!isDynamicTensor)
860 validateFunc(outputInfo, isSupported);
873 assert(layer !=
nullptr);
877 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
878 for (
int i = 0; i < numInputSlots; ++i)
885 auto transposeOutputShape = [&](){
890 permutationPair.second);
893 IsTransposeSupported,
905 permutationPair.second);
906 layer = &deswizzleLayer;
911 if (needPermute && !isDynamicTensor)
913 transposeOutputShape();
916 if (inputsHaveBeenReshaped)
922 if (!ValidateConcatOutputShape(inputShapes,
926 return Fail(
"%s: Error validating the output shape for concat", __func__);
928 transposeOutputShape();
933 if (tensorDimensionsAdded == 1)
938 else if (tensorDimensionsAdded == 2)
948 auto validateReshapeFunc = [&](
const armnn::TensorInfo& afterConcatInfo,
bool& isSupported){
960 validateReshapeFunc(afterConcatInfo, isSupported);
978 validateReshapeFunc);
986 VLOG(DRIVER) <<
"Converter::ConvertConv2d()";
991 return Fail(
"%s: Operation has invalid inputs", __func__);
997 return Fail(
"%s: Could not read output 0", __func__);
1007 bool implicitPadding = operation.inputs.size() == 7
1008 || (operation.inputs.size() >= 8
1011 if (implicitPadding)
1015 else if (operation.inputs.size() >= 10)
1029 return Fail(
"%s: Operation has unsupported weights OperandLifeTime", __func__);
1038 return Fail(
"%s: Operation has invalid inputs", __func__);
1044 return Fail(
"%s: Operation has invalid inputs", __func__);
1051 ActivationFn activation;
1052 if (implicitPadding)
1054 ::android::nn::PaddingScheme paddingScheme;
1061 return Fail(
"%s: Operation has invalid inputs (implicit padding)", __func__);
1065 unsigned int widthIndex = dataLayoutIndexed.
GetWidthIndex();
1067 const uint32_t kernelX = weightsInfo.
GetShape()[widthIndex];
1068 const uint32_t kernelY = weightsInfo.
GetShape()[heightIndex];
1069 const uint32_t inputX = inputInfo.
GetShape()[widthIndex];
1070 const uint32_t inputY = inputInfo.
GetShape()[heightIndex];
1076 else if (operation.inputs.size() >= 10)
1088 return Fail(
"%s: Operation has invalid inputs (explicit padding)", __func__);
1093 return Fail(
"%s: Unsupported number of operation inputs", __func__);
1099 bool requiresValidation =
true;
1108 requiresValidation =
false;
1109 VLOG(DRIVER) <<
"Converter::ConvertConv2d(): Weights and Biases are as INPUTS.";
1112 auto validateFunc = [&](
const armnn::TensorInfo& outputInfo,
bool& isSupported) {
1124 if (requiresValidation)
1126 VLOG(DRIVER) <<
"Converter::ConvertConv2d(): Requires Validation!";
1127 bool isSupported =
false;
1130 validateFunc(outputInfo, isSupported);
1147 return Fail(
"%s: AddConvolution2dLayer failed", __func__);
1159 VLOG(DRIVER) <<
"Converter::ConvertDepthToSpace()";
1164 return Fail(
"%s: Operation has invalid inputs", __func__);
1171 return Fail(
"%s: Only inputs with rank 4 are supported", __func__);
1177 return Fail(
"%s: Could not read output 0", __func__);
1187 return Fail(
"%s: Block size must be at least 1 in all dimensions");
1191 if (Is12OrLaterOperand(*output))
1196 bool isSupported =
false;
1200 IsDepthToSpaceSupported,
1210 validateFunc(outputInfo, isSupported);
1223 assert(layer !=
nullptr);
1231 VLOG(DRIVER) <<
"Converter::ConvertDepthwiseConv2d()";
1237 return Fail(
"%s: Operation has invalid inputs", __func__);
1244 return Fail(
"%s: Could not read output 0", __func__);
1254 if (!weightsOperand)
1256 return Fail(
"%s: Could not read weights", __func__);
1261 if (weightsOperand->dimensions[0] != 1)
1263 return Fail(
"%s: Filter operand dimension 0 is invalid, should be 1", __func__);
1270 bool implicitPadding = operation.inputs.size() == 8
1271 || (operation.inputs.size() >= 9
1275 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
1279 unsigned int widthIndex = dataLayoutIndexed.
GetWidthIndex();
1285 return Fail(
"%s: Operation has invalid inputs", __func__);
1291 return Fail(
"%s: Could not read bias", __func__);
1297 return Fail(
"%s: Operation has invalid inputs", __func__);
1304 ActivationFn activation;
1305 if (implicitPadding)
1307 ::android::nn::PaddingScheme paddingScheme;
1314 return Fail(
"%s: Operation has invalid inputs (implicit padding)", __func__);
1317 const uint32_t kernelX = weightsInfo.
GetShape()[2];
1318 const uint32_t kernelY = weightsInfo.
GetShape()[1];
1319 const uint32_t inputX = inputInfo.
GetShape()[widthIndex];
1320 const uint32_t inputY = inputInfo.
GetShape()[heightIndex];
1325 else if (operation.inputs.size() >= 11)
1337 return Fail(
"%s: Operation has invalid inputs (explicit padding)", __func__);
1342 return Fail(
"%s: Unsupported number of operation inputs", __func__);
1348 bool requiresValidation =
true;
1354 requiresValidation =
false;
1355 VLOG(DRIVER) <<
"Converter::ConvertDepthwiseConv2d(): Weights and Biases are as INPUTS.";
1358 auto validateFunc = [&](
const armnn::TensorInfo& outputInfo,
bool& isSupported) {
1370 if (requiresValidation)
1372 VLOG(DRIVER) <<
"Converter::ConvertDepthwiseConv2d(): Requires Validation!";
1373 bool isSupported =
false;
1376 validateFunc(outputInfo, isSupported);
1393 return Fail(
"%s: AddDepthwiseConvolution2dLayer failed", __func__);
1407 VLOG(DRIVER) <<
"Converter::ConvertDequantize()";
1412 return Fail(
"%s: Operation has invalid input", __func__);
1419 return Fail(
"%s: Operation has quantization dimension different than 0", __func__);
1425 return Fail(
"%s: Operation has invalid outputs", __func__);
1430 bool isSupported =
false;
1447 validateFunc(outputInfo, isSupported);
1456 assert(layer !=
nullptr);
1464 VLOG(DRIVER) <<
"Converter::ConvertDiv()";
1471 return Fail(
"%s: Operation has invalid inputs", __func__);
1476 ActivationFn activationFunction;
1479 return Fail(
"%s: Operation has invalid inputs", __func__);
1485 return Fail(
"%s: Could not read output 0", __func__);
1490 bool isSupported =
false;
1504 validateFunc(outputInfo, isSupported);
1518 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1519 if (!isReshapeSupported)
1525 data,
nullptr, validateFunc, activationFunction);
1528 bool Converter::ConvertElementwiseUnary(
const Operation& operation,
1533 VLOG(DRIVER) <<
"Converter::ConvertElementwiseUnary()";
1540 return Fail(
"%s: Operation has invalid input", __func__);
1546 return Fail(
"%s: Could not read output 0", __func__);
1554 bool isSupported =
false;
1559 IsElementwiseUnarySupported,
1569 validateFunc(outputInfo, isSupported);
1582 assert(layer !=
nullptr);
1590 VLOG(DRIVER) <<
"Converter::ConvertElu()";
1595 return Fail(
"%s: Operation has invalid inputs", __func__);
1602 return Fail(
"%s: Operation has invalid inputs", __func__);
1609 if (inputType == OperandType::TENSOR_FLOAT16)
1613 if (!
GetInputScalar(operation, 1, OperandType::FLOAT16, alpha, model, data))
1615 return Fail(
"%s: Operation has invalid inputs (FLOAT16)", __func__);
1618 desc.
m_A =
static_cast<float>(alpha);
1620 else if (inputType == OperandType::TENSOR_FLOAT32)
1624 return Fail(
"%s: Operation has invalid inputs (FLOAT32)", __func__);
1629 return Fail(
"%s: Unsupported input tensor type: %d", __func__, inputType);
1637 VLOG(DRIVER) <<
"Converter::ConvertExpandDims()";
1643 return Fail(
"%s: Operation has invalid input", __func__);
1649 return Fail(
"%s: Operation has invalid output", __func__);
1655 if (!
GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
1657 return Fail(
"%s: failed to get axis input value", __func__);
1666 catch (
const std::exception& e)
1668 return Fail(
"%s: %s", __func__, e.what());
1674 bool isSupported =
false;
1688 if (targetShape != outputInfo.
GetShape())
1690 return Fail(
"%s: Shape of the output operand does not match the resolved expanded shape", __func__);
1692 validateFunc(outputInfo, isSupported);
1705 assert(layer !=
nullptr);
1713 VLOG(DRIVER) <<
"Converter::ConvertFill()";
1717 return Fail(
"%s: Operation has invalid inputs", __func__);
1723 return Fail(
"%s: Could not read output", __func__);
1730 return Fail(
"%s: Dynamic output tensors are not supported", __func__);
1737 if (outputType == OperandType::TENSOR_FLOAT16)
1741 if (!
GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
1743 return Fail(
"%s: Operation has invalid inputs %d", __func__, outputType);
1746 descriptor.
m_Value =
static_cast<float>(value);
1748 else if (outputType == OperandType::TENSOR_FLOAT32)
1752 return Fail(
"%s: Operation has invalid inputs %d", __func__, outputType);
1755 else if (outputType == OperandType::TENSOR_INT32)
1759 if (!
GetInputScalar(operation, 1, OperandType::INT32, value, model, data))
1761 return Fail(
"%s: Operation has invalid inputs %d", __func__, outputType);
1764 descriptor.
m_Value =
static_cast<float>(value);
1768 return Fail(
"%s: Unsupported input tensor type: %d", __func__, outputType);
1771 bool isSupported =
false;
1785 assert(layer !=
nullptr);
1793 VLOG(DRIVER) <<
"Converter::ConvertFloor()";
1797 return Fail(
"%s: Operation has invalid inputs", __func__);
1803 return Fail(
"%s: Operation has invalid outputs", __func__);
1808 bool isSupported =
false;
1821 validateFunc(outputInfo, isSupported);
1834 assert(layer !=
nullptr);
1842 VLOG(DRIVER) <<
"Converter::ConvertFullyConnected()";
1846 return Fail(
"%s: Operation has invalid inputs", __func__);
1852 return Fail(
"%s: Could not read output 0", __func__);
1860 if (!weightsOperand)
1862 return Fail(
"%s: Could not read weights", __func__);
1870 return Fail(
"%s: Operation has invalid inputs", __func__);
1877 return Fail(
"%s: Could not read bias", __func__);
1885 return Fail(
"%s: Operation has invalid inputs", __func__);
1894 catch (
const std::exception& e)
1896 return Fail(
"%s: %s", __func__, e.what());
1901 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
1903 ActivationFn activationFunction;
1906 return Fail(
"%s: Operation has invalid inputs", __func__);
1914 bool isSupported =
false;
1917 if (!VerifyFullyConnectedShapes(reshapedInfo.
GetShape(),
1922 isSupported =
false;
1923 Fail(
"%s: Expected outputShape does not match actual outputShape", __func__);
1940 validateFunc(outputInfo, isSupported);
1961 assert(reshapeLayer !=
nullptr);
1976 data,
nullptr, validateFunc, activationFunction);
1981 VLOG(DRIVER) <<
"Converter::ConvertGather()";
1986 return Fail(
"%s: Operation has invalid input", __func__);
1993 return Fail(
"%s: Operation has invalid indices", __func__);
2000 return Fail(
"%s: Operation has invalid output", __func__);
2004 if (outputDimensions != inputDimensions + indicesDimensions - 1)
2006 return Fail(
"%s: Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
2007 __func__, outputDimensions, inputDimensions, indicesDimensions);
2011 if (!
GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
2013 return Fail(
"%s: Operation has invalid or unsupported axis operand", __func__);
2015 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2017 return Fail(
"%s: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", __func__, axis,
2018 inputDimensions, inputDimensions);
2024 bool isSupported =
false;
2039 validateFunc(outputInfo, isSupported);
2052 assert(layer !=
nullptr);
2061 VLOG(DRIVER) <<
"Converter::ConvertGroupedConv2d()";
2068 return Fail(
"%s: Operation has invalid inputs", __func__);
2075 return Fail(
"%s: Could not read output 0", __func__);
2081 if (operation.inputs.size() == 12)
2095 const ConstTensorPin weightsPin = (dataLayout == DataLayout::NCHW) ?
2097 model, data, ohwiToOihw) :
2103 return Fail(
"%s: Operation has invalid inputs", __func__);
2108 SanitizeBiasQuantizationScale(biases.
GetInfo(), weights.
GetInfo(), inputInfo);
2117 const unsigned int heightIndex = dataLayoutIndexed.
GetHeightIndex();
2118 const unsigned int widthIndex = dataLayoutIndexed.
GetWidthIndex();
2125 ActivationFn activation;
2127 if (operation.inputs.size() == 12)
2135 !
GetInputScalar(operation, 9, OperandType::INT32, numGroups, model, data) ||
2138 return Fail(
"%s: Operation has invalid inputs (explicit padding)", __func__);
2142 else if (operation.inputs.size() == 9)
2144 ::android::nn::PaddingScheme paddingScheme;
2148 !
GetInputScalar(operation, 6, OperandType::INT32, numGroups, model, data) ||
2151 return Fail(
"%s: Operation has invalid inputs (implicit padding)", __func__);
2154 const uint32_t inputX = inputInfo.
GetShape()[widthIndex];
2155 const uint32_t inputY = inputInfo.
GetShape()[heightIndex];
2157 const uint32_t kernelX = weightsShape[widthIndex];
2158 const uint32_t kernelY = weightsShape[heightIndex];
2165 return Fail(
"%s: Unsupported number of operation inputs", __func__);
2169 const unsigned int outputChannels = weightsShape[0];
2171 const unsigned int channelsPerGroup = weightsShape[channelsIndex];
2172 const unsigned int channelMultiplier = outputChannels / numGroups;
2179 return Fail(
"%s: Number of groups must be greater than 0. Got: %d", __func__, numGroups);
2182 if (outputChannels % numGroups != 0u)
2184 return Fail(
"%s: Output channels must be divisible by the number of groups", __func__);
2190 unsigned int splitterDimSizes[4] = { inputShape[0], inputShape[1], inputShape[2], inputShape[3] };
2191 splitterDimSizes[channelsIndex] /= numGroups;
2199 std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(numGroups, std::ref(splitterOutputInfo));
2202 for (
unsigned int group = 0u; group < numGroups; ++group)
2204 splitterDesc.
SetViewOriginCoord(group, channelsIndex, splitterDimSizes[channelsIndex] * group);
2205 for (
unsigned int dimIdx = 0u; dimIdx < 4u; dimIdx++)
2207 splitterDesc.
SetViewSize(group, dimIdx, splitterDimSizes[dimIdx]);
2211 bool isSupported =
false;
2217 splitterOutputInfos,
2227 return Fail(
"%s: Failed to add SplitterLayer", __func__);
2231 for (
unsigned int group = 0u; group < splitterLayer->
GetNumOutputSlots(); ++group)
2242 groupInputShape[channelsIndex] = channelsPerGroup;
2245 groupWeightsShape[0] /= channelMultiplier * numGroups;
2251 groupInputInfo.SetShape(groupInputShape);
2255 groupWeightsInfo.
SetShape(groupWeightsShape);
2259 groupBiasesInfo.
SetShape(groupBiasesShape);
2267 groupOutputShape[channelsIndex] = 1;
2269 groupOutputInfo.
SetShape(groupOutputShape);
2274 std::vector<IConnectableLayer*> convLayers(numGroups * channelMultiplier,
nullptr);
2275 for (
unsigned int group = 0u; group < numGroups; ++group)
2277 for (
unsigned int m = 0u; m < channelMultiplier; ++m)
2279 auto index = group * channelMultiplier + m;
2281 const unsigned int weightsDataOffset = groupWeightsShape.
GetNumElements() * index * weightsDataTypeSize;
2282 const unsigned int biasesDataOffset = groupBiasesShape.GetNumElements() * index * biasesDataTypeSize;
2289 std::vector<float>(weightsQuantScales.begin() + index,
2290 weightsQuantScales.begin() + index + groupWeightsShape[0]));
2295 std::vector<float>(biasesQuantScales.begin() + index,
2296 biasesQuantScales.begin() + index + groupWeightsShape[0]));
2301 static_cast<const void *>(reinterpret_cast<const char *>(weights.
GetMemoryArea()) +
2302 weightsDataOffset));
2304 static_cast<const void *>(reinterpret_cast<const char *>(biases.
GetMemoryArea()) +
2307 isSupported =
false;
2323 validateFunc(groupOutputInfo, isSupported);
2341 return Fail(
"%s: AddConvolution2dLayer failed", __func__);
2366 convLayers[index] = convLayer;
2376 for (
unsigned int group = 0u; group < numGroups; ++group)
2378 for (
unsigned int m = 0u; m < channelMultiplier; ++m)
2380 auto index = group * channelMultiplier + m;
2386 isSupported =
false;
2391 std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
2403 return Fail(
"%s: AddConcatLayer failed", __func__);
2406 for (
unsigned int group = 0u; group < numGroups; ++group)
2408 for (
unsigned int m = 0u; m < channelMultiplier; ++m)
2410 auto index = group * channelMultiplier + m;
2411 convLayers[index]->GetOutputSlot(0).Connect(concatLayer->
GetInputSlot(index));
2417 data,
nullptr,
nullptr, activation);
2422 VLOG(DRIVER) <<
"Converter::ConvertHardSwish()";
2424 desc.
m_Function = ActivationFunction::HardSwish;
2431 VLOG(DRIVER) <<
"Converter::ConvertInstanceNormalization()";
2436 return Fail(
"%s: Operation has an invalid input 0", __func__);
2442 return Fail(
"%s: Operation has an invalid output", __func__);
2451 return Fail(
"%s: Operation has invalid inputs", __func__);
2457 if (inputType == OperandType::TENSOR_FLOAT16)
2463 if (!
GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Gamma, model, data) ||
2464 !
GetInputScalar(operation, 2, OperandType::FLOAT16, fp16Beta, model, data) ||
2465 !
GetInputScalar(operation, 3, OperandType::FLOAT16, fp16Epsilon, model, data))
2467 return Fail(
"%s: Operation has invalid inputs (FLOAT16)", __func__);
2470 desc.
m_Gamma =
static_cast<float>(fp16Gamma);
2471 desc.
m_Beta =
static_cast<float>(fp16Beta);
2472 desc.
m_Eps =
static_cast<float>(fp16Epsilon);
2474 else if (inputType == OperandType::TENSOR_FLOAT32)
2480 return Fail(
"%s: Operation has invalid inputs (FLOAT32)", __func__);
2485 return Fail(
"%s: Unsupported input tensor type: %d", __func__, inputType);
2490 bool isSupported =
false;
2494 IsInstanceNormalizationSupported,
2508 validateFunc(outputInfo, isSupported);
2524 VLOG(DRIVER) <<
"Converter::ConvertL2Normalization()";
2526 if (operation.inputs.size() != 1)
2528 return Fail(
"%s: Optional inputs are not supported", __func__);
2534 return Fail(
"%s: Operation has invalid inputs", __func__);
2540 return Fail(
"%s: Could not read output 0", __func__);
2548 return Fail(
"%s: Tensor Rank other than 4 is not supported", __func__);
2554 bool isSupported =
false;
2568 validateFunc(outputInfo, isSupported);
2581 assert(layer !=
nullptr);
2589 VLOG(DRIVER) <<
"Converter::ConvertL2Pool2d()";
2590 return ConvertPooling2d(operation, __func__, PoolingAlgorithm::L2, model, data);
2593 bool Converter::ConvertLocalResponseNormalization(
const Operation& operation,
2597 VLOG(DRIVER) <<
"Converter::ConvertLocalResponseNormalization()";
2599 if (operation.inputs.size() != 5)
2601 return Fail(
"%s: Optional inputs are not supported", __func__);
2607 return Fail(
"%s: Operation has invalid inputs", __func__);
2613 return Fail(
"%s: Could not read output 0", __func__);
2621 return Fail(
"%s: Tensor Rank other than 4 is not supported", __func__);
2635 return Fail(
"%s: Operation has invalid inputs", __func__);
2642 bool isSupported =
false;
2656 validateFunc(outputInfo, isSupported);
2670 assert(layer !=
nullptr);
2676 bool Converter::ConvertLogicalBinary(
const Operation& operation,
2681 VLOG(DRIVER) <<
"Converter::ConvertLogicalBinary()";
2682 VLOG(DRIVER) <<
"ConvertLogicalBinary()";
2690 return Fail(
"%s: Operation has invalid inputs", __func__);
2696 return Fail(
"%s: Could not read output 0", __func__);
2705 bool isSupported =
false;
2710 IsLogicalBinarySupported,
2721 validateFunc(outputInfo, isSupported);
2734 assert(layer !=
nullptr);
2736 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
2737 if (!isReshapeSupported)
2747 VLOG(DRIVER) <<
"Converter::ConvertLogistic()";
2756 VLOG(DRIVER) <<
"Converter::ConvertLogSoftmax()";
2761 return Fail(
"%s: Failed to read input 0", __func__);
2767 return Fail(
"%s: Failed to read output", __func__);
2776 return Fail(
"%s: Operation has invalid inputs", __func__);
2782 if (inputType == OperandType::TENSOR_FLOAT16)
2785 if (!
GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Beta, model, data))
2787 return Fail(
"%s: Failed to read input 1 (FLOAT16)", __func__);
2790 descriptor.
m_Beta =
static_cast<float>(fp16Beta);
2792 else if (inputType == OperandType::TENSOR_FLOAT32)
2796 return Fail(
"%s: Failed to read input 1 (FLOAT32)", __func__);
2801 return Fail(
"%s: Unsupported input tensor type: %d", __func__, inputType);
2807 return Fail(
"%s: Failed to read input 2", __func__);
2810 bool isSupported =
false;
2814 IsLogSoftmaxSupported,
2828 validateFunc(outputInfo, isSupported);
2839 return Fail(
"%s: AddLogSoftmaxLayer() returned nullptr", __func__);
2849 VLOG(DRIVER) <<
"Converter::ConvertLstm()";
2857 return Fail(
"%s: Could not read input 0: input", __func__);
2863 return Fail(
"%s: Could not read input 18: outputStateIn", __func__);
2869 return Fail(
"%s: Could not read input 19: cellStateIn", __func__);
2907 if (!inputToForgetWeightsPin.
IsValid() ||
2908 !inputToCellWeightsPin.
IsValid() ||
2909 !inputToOutputWeightsPin.
IsValid() ||
2910 !recurrentToForgetWeightsPin.
IsValid() ||
2911 !recurrentToCellWeightsPin.
IsValid() ||
2912 !recurrentToOutputWeightsPin.
IsValid() ||
2913 !forgetGateBiasPin.
IsValid() ||
2917 return Fail(
"%s: Operation has invalid tensor inputs", __func__);
2963 if ((!inputToInputWeightsPin.
IsValid() && !inputToInputWeightsPin.
IsOptional()) ||
2964 (!recurrentToInputWeightsPin.
IsValid() && !recurrentToInputWeightsPin.
IsOptional()) ||
2972 return Fail(
"%s: Operation has invalid tensor inputs", __func__);
2982 ActivationFn activation = ActivationFn::kActivationNone;
2986 !
GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
2987 !
GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
2989 return Fail(
"%s: Operation has invalid scalar inputs", __func__);
3037 return Fail(
"%s: Could not read output 0: scratchBuffer", __func__);
3041 if (!outputStateOut)
3043 return Fail(
"%s: Could not read output 1: outputStateOut", __func__);
3049 return Fail(
"%s: Could not read output 2: cellStateOut", __func__);
3056 return Fail(
"%s: Could not read output 3: output", __func__);
3105 return Fail(
"%s: All, or none, of input-to-input weights, recurrent-to-input weights," 3106 " and input gate bias must be provided", __func__);
3111 return Fail(
"%s: projection bias should not be provided without projection weights", __func__);
3119 return Fail(
"%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided" 3120 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
3129 return Fail(
"%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be" 3130 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
3195 bool isSupported =
false;
3213 bool isDynamic =
false;
3219 validateFunc(outputInfo, isSupported);
3254 operation, 3, *layer, 3, model, data,
nullptr, validateFunc, ActivationFn::kActivationNone,
true));
3261 VLOG(DRIVER) <<
"Converter::ConvertMaxPool2d()";
3262 return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Max, model, data);
3267 VLOG(DRIVER) <<
"Converter::ConvertMaximum()";
3274 return Fail(
"%s: Operation has invalid inputs", __func__);
3280 return Fail(
"%s: Could not read output", __func__);
3285 bool isSupported =
false;
3303 validateFunc(outInfo, isSupported);
3312 assert(layer !=
nullptr);
3313 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
3314 if (!isReshapeSupported)
3324 VLOG(DRIVER) <<
"Converter::ConvertMean()";
3329 return Fail(
"%s: Operation has invalid inputs", __func__);
3335 return Fail(
"%s: Could not read output 0", __func__);
3343 return Fail(
"%s: Could not read input 1", __func__);
3346 std::vector<int32_t> axis;
3349 return Fail(
"%s: Input 1 has invalid values", __func__);
3356 std::set<unsigned int> uniqueAxis;
3357 std::transform(axis.begin(), axis.end(),
3358 std::inserter(uniqueAxis, uniqueAxis.begin()),
3359 [rank](
int i) ->
unsigned int {
return (i + rank) % rank; });
3362 int32_t keepDims = 0;
3365 return Fail(
"%s: Could not read input 2", __func__);
3369 descriptor.
m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3372 bool isSupported =
false;
3386 validateFunc(outputInfo, isSupported);
3399 assert(layer !=
nullptr);
3407 VLOG(DRIVER) <<
"Converter::ConvertMinimum()";
3414 return Fail(
"%s: Operation has invalid inputs", __func__);
3420 return Fail(
"%s: Could not read output 0", __func__);
3425 bool isSupported =
false;
3443 validateFunc(outputInfo, isSupported);
3452 assert(layer !=
nullptr);
3453 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
3454 if (!isReshapeSupported)
3464 VLOG(DRIVER) <<
"Converter::ConvertMul()";
3471 return Fail(
"%s: Operation has invalid inputs", __func__);
3476 ActivationFn activationFunction;
3479 return Fail(
"%s: Operation has invalid inputs", __func__);
3484 if (outputOperand ==
nullptr)
3491 bool isSupported =
false;
3505 validateFunc(outputInfo, isSupported);
3519 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3520 if (!isReshapeSupported)
3526 data,
nullptr, validateFunc, activationFunction);
3531 VLOG(DRIVER) <<
"Converter::ConvertPad()";
3536 return Fail(
"%s: Operation has invalid inputs", __func__);
3545 return Fail(
"%s: Could not convert paddings", __func__);
3561 return Fail(
"%s: Could not read output", __func__);
3566 bool isSupported =
false;
3580 validateFunc(outputInfo, isSupported);
3593 assert(layer !=
nullptr);
3601 VLOG(DRIVER) <<
"Converter::ConvertPadV2()";
3606 return Fail(
"%s: Could not read input 0", __func__);
3612 return Fail(
"%s: Could not read output", __func__);
3621 return Fail(
"%s: Could not convert paddings", __func__);
3633 return Fail(
"%s: Operation has invalid inputs", __func__);
3637 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
3640 if (!
GetInputScalar(operation, 2, operandType2, f16PadValue, model, data))
3642 return Fail(
"%s: Could not read input 2 (FLOAT16)", __func__);
3647 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
3651 return Fail(
"%s: Could not read input 2 (FLOAT32)", __func__);
3656 int32_t intPadValue = 0;
3659 return Fail(
"%s: Could not read input 2 (INT32)", __func__);
3665 return Fail(
"%s: Operation has invalid inputs: type mismatch", __func__);
3668 bool isSupported =
false;
3686 validateFunc(outputInfo, isSupported);
3695 assert(layer !=
nullptr);
3703 VLOG(DRIVER) <<
"Converter::ConvertPrelu()";
3710 return Fail(
"%s: Operation has invalid inputs", __func__);
3717 return Fail(
"%s: Could not read output", __func__);
3724 bool isSupported =
false;
3742 validateFunc(outputInfo, isSupported);
3754 return Fail(
"%s: AddPreluLayer failed", __func__);
3757 bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
3758 if (!isReshapeSupported)
3768 VLOG(DRIVER) <<
"Converter::ConvertQuantize()";
3773 return Fail(
"%s: Operation has invalid input", __func__);
3779 return Fail(
"%s: Operation has invalid outputs", __func__);
3784 bool isSupported =
false;
3788 IsQuantizeSupported,
3801 validateFunc(outputInfo, isSupported);
3810 assert(layer !=
nullptr);
3818 VLOG(DRIVER) <<
"Converter::ConvertQuantizedLstm()";
3820 VLOG(DRIVER) <<
"ConvertQuantizedLstm()";
3828 return Fail(
"%s: Could not read input 0: input", __func__);
3833 if (!outputStatePrevTimeStep.
IsValid())
3835 return Fail(
"%s: Could not read input 18: outputStatePrevTimeStep", __func__);
3840 if (!cellStatePrevTimeStep.
IsValid())
3842 return Fail(
"%s: Could not read input 19: cellStatePrevTimeStep", __func__);
3889 if (!inputToForgetWeightsPin.
IsValid() ||
3890 !inputToCellWeightsPin.
IsValid() ||
3891 !inputToOutputWeightsPin.
IsValid() ||
3892 !recurrentToForgetWeightsPin.
IsValid() ||
3893 !recurrentToCellWeightsPin.
IsValid() ||
3894 !recurrentToOutputWeightsPin.
IsValid() ||
3895 !forgetGateBiasPin.
IsValid() ||
3899 return Fail(
"%s: Operation has invalid tensor inputs", __func__);
3992 || (!recurrentToInputWeightsPin.
IsValid() && !recurrentToInputWeightsPin.
IsOptional())
4000 return Fail(
"%s: Operation has invalid tensor inputs", __func__);
4050 if ((!inputLayerNormWeightsPin.
IsValid() && !inputLayerNormWeightsPin.
IsOptional())
4051 || (!forgetLayerNormWeightsPin.
IsValid() && !forgetLayerNormWeightsPin.
IsOptional())
4052 || (!cellLayerNormWeightsPin.
IsValid() && !cellLayerNormWeightsPin.
IsOptional())
4053 || (!outputLayerNormWeightsPin.
IsValid() && !outputLayerNormWeightsPin.
IsOptional()))
4055 return Fail(
"%s: Operation has invalid tensor inputs", __func__);
4069 float cellClip, projClip, matMulInputGate, matMulForgetGate, matMulCellGate, matMulOutputGate, projInputScale;
4070 int projInputZeroPoint;
4072 if (!
GetInputScalar(operation, 24, OperandType::FLOAT32, cellClip, model, data,
true) ||
4073 !
GetInputScalar(operation, 25, OperandType::FLOAT32, projClip, model, data,
true) ||
4074 !
GetInputScalar(operation, 26, OperandType::FLOAT32, matMulInputGate, model, data) ||
4075 !
GetInputScalar(operation, 27, OperandType::FLOAT32, matMulForgetGate, model, data) ||
4076 !
GetInputScalar(operation, 28, OperandType::FLOAT32, matMulCellGate, model, data) ||
4077 !
GetInputScalar(operation, 29, OperandType::FLOAT32, matMulOutputGate, model, data) ||
4078 !
GetInputScalar(operation, 30, OperandType::INT32, projInputZeroPoint, model, data) ||
4079 !
GetInputScalar(operation, 31, OperandType::FLOAT32, projInputScale, model, data))
4081 return Fail(
"%s: Operation has invalid scalar inputs", __func__);
4088 if (!outputStateOut)
4090 return Fail(
"%s: Could not read output 0: outputStateOut", __func__);
4097 return Fail(
"%s: Could not read output 1: cellStateOut", __func__);
4105 return Fail(
"%s: Could not read output 2: output", __func__);
4159 return Fail(
"%s: All, or none, of input-to-input weights, recurrent-to-input weights," 4160 " and input gate bias must be provided", __func__);
4165 return Fail(
"%s: projection bias should not be provided without projection weights", __func__);
4173 return Fail(
"%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided" 4174 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
4183 return Fail(
"%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be" 4184 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
4240 const TensorInfo constOutputStateOutInfo(outputStateOutInfo);
4241 const TensorInfo constOutputInfo(outputInfo);
4261 bool isSupported =
false;
4262 auto validateFunc = [&](
const armnn::TensorInfo& cellStateOutInfo,
bool& isSupported)
4269 outputStatePrevTimeStepInfo,
4270 cellStatePrevTimeStepInfo,
4271 constOutputStateOutInfo,
4278 bool isDynamic =
false;
4283 validateFunc(outputInfo, isSupported);
4306 operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4313 operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4315 operation, 1, *layer, 1, model, data,
nullptr, validateFunc,
4316 ActivationFn::kActivationNone,
true) &&
4323 VLOG(DRIVER) <<
"Converter::ConvertQuantized16BitLstm()";
4324 VLOG(DRIVER) <<
"Policy::ConvertQuantized16BitLstm()";
4332 return Fail(
"%s: Could not read input 0: input", __func__);
4339 if (!previousCellStateIn.
IsValid())
4341 return Fail(
"%s: Could not read input 13: previousCellStateIn", __func__);
4348 if (!previousOutputIn.
IsValid())
4350 return Fail(
"%s: Could not read input 14: previousOutputIn", __func__);
4426 if (!inputToInputWeightsPin.
IsValid() ||
4427 !inputToForgetWeightsPin.
IsValid() ||
4428 !inputToCellWeightsPin.
IsValid() ||
4429 !inputToOutputWeightsPin.
IsValid() ||
4430 !recurrentToInputWeightsPin.
IsValid() ||
4431 !recurrentToForgetWeightsPin.
IsValid() ||
4432 !recurrentToCellWeightsPin.
IsValid() ||
4433 !recurrentToOutputWeightsPin.
IsValid() ||
4434 !inputGateBiasPin.
IsValid() ||
4435 !forgetGateBiasPin.
IsValid() ||
4439 return Fail(
"%s: Operation has invalid tensor inputs", __func__);
4449 return Fail(
"%s: Could not read output 0: cellStateOut", __func__);
4457 return Fail(
"%s: Could not read output 1: output", __func__);
4472 return Fail(
"%s: Dynamic output tensors are not supported", __func__);
4504 bool isSupported =
false;
4512 previousCellStateInInfo,
4513 previousOutputInInfo,
4519 bool isDynamic =
false;
4523 validateFunc(outputInfo, isSupported);
4550 operation, 1, *layer, 1, model, data,
nullptr, validateFunc, ActivationFn::kActivationNone,
true));
4557 VLOG(DRIVER) <<
"Converter::ConvertRank()";
4562 if (inputOperand ==
nullptr || outputOperand ==
nullptr)
4564 return Fail(
"%s: Operation has invalid inputs", __func__);
4567 const Shape inputOperandShape = GetOperandShape(*inputOperand);
4568 const Shape outputOperandShape = GetOperandShape(*outputOperand);
4573 return Fail(
"%s: Could not read input 0", __func__);
4579 return Fail(
"%s: Dynamic output tensors are not supported", __func__);
4582 bool isSupported =
false;
4595 assert(layer !=
nullptr);
4603 VLOG(DRIVER) <<
"Converter::ConvertReLu()";
4611 return Fail(
"%s: Input 0 is invalid",
"operationName");
4622 bool isSupported =
false;
4641 validateFunc(outInfo, isSupported);
4658 VLOG(DRIVER) <<
"Converter::ConvertReLu1()";
4669 VLOG(DRIVER) <<
"Converter::ConvertReLu6()";
4679 VLOG(DRIVER) <<
"Converter::ConvertReshape()";
4685 if (inputOperand ==
nullptr 4686 || requestedShapeOperand ==
nullptr 4687 || outputOperand ==
nullptr)
4689 return Fail(
"%s: Operation has invalid inputs", __func__);
4692 if (requestedShapeOperand->dimensions.size() != 1)
4694 return Fail(
"%s: Input 1 expected to be one-dimensional (found %i dimensions)",
4695 __func__, requestedShapeOperand->dimensions.size());
4698 std::vector<int32_t> targetDimensions;
4701 return Fail(
"%s: Could not read values of input 1", __func__);
4704 const Shape inputOperandShape = GetOperandShape(*inputOperand);
4706 Shape requestedShape;
4709 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
4711 return Fail(
"%s: Failed to resolve the requested shape", __func__);
4717 return Fail(
"%s: Could not read input 0", __func__);
4722 requestedShape.dimensions.data());
4726 bool isSupported =
false;
4740 validateFunc(outputInfo, isSupported);
4753 assert(layer !=
nullptr);
4759 bool Converter::ConvertResize(
const Operation& operation,
4764 VLOG(DRIVER) <<
"Converter::ConvertResize()";
4770 return Fail(
"%s: Could not read input 0", __func__);
4776 return Fail(
"%s: Could not read output 0", __func__);
4783 descriptor.
m_Method = resizeMethod;
4792 return Fail(
"%s: Operation has invalid inputs", __func__);
4795 if (operandType1 != operandType2)
4797 return Fail(
"%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
4800 if (operandType1 == OperandType::INT32)
4803 int32_t targetWidth = 0;
4804 int32_t targetHeight = 0;
4806 if (!
GetInputInt32(operation, 1, targetWidth, model, data) ||
4809 return Fail(
"%s: Operation has invalid inputs for resizing by shape", __func__);
4812 if (targetWidth < 0 || targetHeight < 0)
4814 return Fail(
"%s: Operation has invalid inputs for resizing by shape. " 4815 "Target width/height cannot be < 0", __func__);
4818 descriptor.
m_TargetWidth =
static_cast<uint32_t
>(targetWidth);
4821 else if (operandType1 == OperandType::FLOAT32)
4824 float widthScale = 1.0f;
4825 float heightScale = 1.0f;
4830 return Fail(
"%s: Operation has invalid inputs for resizing by scale", __func__);
4836 float width = inputShape[dataLayoutIndexed.
GetWidthIndex()];
4842 else if (operandType1 == OperandType::FLOAT16)
4847 if (!
GetInputScalar(operation, 1, OperandType::FLOAT16, widthScale, model, data) ||
4848 !
GetInputScalar(operation, 2, OperandType::FLOAT16, heightScale, model, data))
4850 return Fail(
"%s: Operation has invalid inputs for resizing by scale", __func__);
4864 return Fail(
"%s: Operand has invalid data type for resizing by scale", __func__);
4870 bool isSupported =
false;
4888 validateFunc(outputInfo, isSupported);
4897 assert(layer !=
nullptr);
4905 VLOG(DRIVER) <<
"Converter::ConvertSpaceToBatchNd()";
4910 return Fail(
"%s: Operation has invalid inputs", __func__);
4915 unsigned int spatialDim = rank - 2;
4919 Fail(
"%s: Only inputs with rank 4 are supported", __func__);
4925 return Fail(
"%s: Could not read output 0", __func__);
4933 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4936 return Fail(
"%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4939 std::vector<int32_t> blockShape;
4942 return Fail(
"%s: Operation has an invalid or unsupported block size operand", __func__);
4944 if(std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i)
4947 return Fail(
"%s: Block shape must be at least 1 in all dimensions.", __func__);
4950 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4953 return Fail(
"%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4956 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4957 std::vector<int32_t> paddings;
4960 return Fail(
"%s: Operation has an invalid or unsupported paddings operand", __func__);
4962 for (
unsigned int i = 0; i < paddings.size() - 1; i += 2)
4964 int paddingBeforeInput = paddings[i];
4965 int paddingAfterInput = paddings[i + 1];
4966 if(paddingBeforeInput < 0 || paddingAfterInput < 0)
4968 return Fail(
"%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4971 paddingList.emplace_back((
unsigned int) paddingBeforeInput, (
unsigned int) paddingAfterInput);
4976 descriptor.
m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4977 descriptor.
m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4979 if(Is12OrLaterOperand(*output))
4984 bool isSupported =
false;
5001 validateFunc(outputInfo, isSupported);
5010 assert(layer !=
nullptr);
5018 VLOG(DRIVER) <<
"Converter::ConvertSpaceToDepth()";
5023 return Fail(
"%s: Operation has invalid inputs", __func__);
5030 return Fail(
"%s: Only inputs with rank 4 are supported", __func__);
5036 return Fail(
"%s: Could not read output 0", __func__);
5047 return Fail(
"%s: Block size must be at least 1 in all dimensions");
5052 bool isSupported =
false;
5070 validateFunc(outputInfo, isSupported);
5079 assert(layer !=
nullptr);
5087 VLOG(DRIVER) <<
"Converter::ConvertSoftmax()";
5092 return Fail(
"%s: Operation has invalid inputs", __func__);
5098 return Fail(
"%s: Operation has no outputs", __func__);
5107 if (outputType == OperandType::TENSOR_FLOAT16)
5111 if (!
GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
5113 return Fail(
"%s: Operation has invalid inputs %d", __func__, outputType);
5116 desc.
m_Beta =
static_cast<float>(value);
5122 return Fail(
"%s: Operation has invalid inputs %d", __func__, outputType);
5133 return Fail(
"%s: Operation has invalid inputs", __func__);
5136 bool isSupported =
false;
5154 validateFunc(outputInfo, isSupported);
5163 assert(layer !=
nullptr);
5171 VLOG(DRIVER) <<
"Converter::ConvertSub()";
5178 return Fail(
"%s: Operation has invalid inputs", __func__);
5183 ActivationFn activationFunction;
5186 return Fail(
"%s: Operation has invalid inputs", __func__);
5192 return Fail(
"%s: Could not read output 0", __func__);
5197 bool isSupported =
false;
5215 validateFunc(outputInfo, isSupported);
5225 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
5226 if (!isReshapeSupported)
5231 data,
nullptr, validateFunc, activationFunction);
5236 VLOG(DRIVER) <<
"Converter::ConvertTanH()";
5248 VLOG(DRIVER) <<
"Converter::ConvertTransposeConv2d()";
5254 return Fail(
"%s: Operation has invalid inputs", __func__);
5261 return Fail(
"%s: Could not read output 0", __func__);
5271 if (weightsOperand ==
nullptr)
5273 return Fail(
"%s: Operand is invalid", __func__);
5279 bool implicitPadding = operation.inputs.size() == 9;
5281 if (implicitPadding )
5291 unsigned int widthIndex = dataLayoutIndexed.
GetWidthIndex();
5300 model, data, OHWIToOIHW) :
5309 return Fail(
"%s: Operation has invalid weights", __func__);
5314 return Fail(
"%s: Operation has invalid biases", __func__);
5319 SanitizeBiasQuantizationScale(bias.
GetInfo(), weights.
GetInfo(), inputInfo);
5321 ActivationFn activation;
5323 if (implicitPadding)
5328 int32_t padRight{0};
5330 int32_t padBottom{0};
5332 ::android::nn::PaddingScheme paddingScheme;
5334 !
GetInputScalar(operation, 5, OperandType::INT32, strideX, model, data) ||
5335 !
GetInputScalar(operation, 6, OperandType::INT32, strideY, model, data) ||
5338 return Fail(
"%s: Operation has invalid inputs (implicit padding)", __func__);
5341 const uint32_t kernelX = weights.
GetShape()[widthIndex];
5342 const uint32_t kernelY = weights.
GetShape()[heightIndex];
5346 std::vector<int32_t> outputShape;
5347 if ((outputShapeOperand) && (
GetTensorInt32Values(*outputShapeOperand, outputShape, model, data)))
5350 for (
int dimension : outputShape)
5352 desc.
m_OutputShape.push_back(static_cast<unsigned int>(dimension));
5362 if (outputShape.size() == 0)
5364 return Fail(
"%s: Padding sizes cannot be inferred", __func__);
5367 outputX = outputShape[widthIndex];
5368 outputY = outputShape[heightIndex];
5372 outputX = outputInfo.
GetShape()[widthIndex];
5373 outputY = outputInfo.
GetShape()[heightIndex];
5376 CalcPaddingTransposeConv(outputX, kernelX, strideX, padLeft, padRight, paddingScheme);
5377 CalcPaddingTransposeConv(outputY, kernelY, strideY, padTop, padBottom, paddingScheme);
5381 if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
5383 return Fail(
"%s: Negative padding values are not supported", __func__);
5393 else if (operation.inputs.size() == 11)
5404 return Fail(
"%s: Operation has invalid inputs (explicit padding)", __func__);
5409 return Fail(
"%s: Unsupported number of operation inputs", __func__);
5415 bool isSupported =
false;
5435 validateFunc(outputInfo, isSupported);
5446 return Fail(
"%s: AddTransposeConvolution2dLayer failed", __func__);
5452 data,
nullptr, validateFunc, activation);
5457 VLOG(DRIVER) <<
"Converter::ConvertSqrt()";
5466 VLOG(DRIVER) <<
"Converter::ConvertSqueeze()";
5471 return Fail(
"%s: Operation has invalid inputs", __func__);
5478 Fail(
"%s: Inputs with rank greater than 4 are not supported", __func__);
5484 return Fail(
"%s: Could not read output 0", __func__);
5489 return Fail(
"%s: Dynamic output tensors are not supported", __func__);
5496 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
5498 std::vector<int32_t> axis;
5501 axis.assign(dimensionSequence,
5502 dimensionSequence + rank);
5506 return Fail(
"%s: Operation has an invalid or unsupported axis operand", __func__);
5509 std::vector<uint32_t> outputDims;
5510 for (
unsigned int i = 0; i < rank; i++)
5512 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
5513 auto currentDimension = inputInfo.
GetShape()[i];
5514 if (skipSqueeze || currentDimension != 1)
5516 outputDims.push_back(currentDimension);
5528 bool isSupported =
false;
5543 assert(layer !=
nullptr);
5551 VLOG(DRIVER) <<
"Converter::ConvertStridedSlice()";
5556 return Fail(
"%s: Operation has invalid inputs", __func__);
5563 Fail(
"%s: Inputs with rank greater than 4 are not supported", __func__);
5569 return Fail(
"%s: Could not read output 0", __func__);
5578 std::vector<int32_t> beginValues;
5579 std::vector<int32_t> endValues;
5580 std::vector<int32_t> stridesValues;
5583 auto ValidateInputOperands = [&] (
const Operand& operand, std::vector<int32_t>& operandValues)
5590 if (operandValues.size() != rank)
5598 if (!ValidateInputOperands(*beginOperand, beginValues)
5599 || !ValidateInputOperands(*endOperand, endValues)
5600 || !ValidateInputOperands(*stridesOperand, stridesValues))
5602 return Fail(
"%s: Operation has invalid input operand", __func__);
5606 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){
return i == 0; }))
5608 return Fail(
"%s: Stride must be non-zero value.", __func__);
5612 descriptor.
m_Begin.assign(beginValues.cbegin(), beginValues.cend());
5613 descriptor.
m_End.assign(endValues.cbegin(), endValues.cend());
5614 descriptor.
m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
5622 return Fail(
"%s: Operation has invalid inputs", __func__);
5625 bool isSupported =
false;
5643 validateFunc(outputInfo, isSupported);
5655 int stride = descriptor.
m_Stride[i];
5661 if (((descriptor.
m_Begin[i] - descriptor.
m_End[i]) > 1)
5662 || ((descriptor.
m_Begin[i] - descriptor.
m_End[i]) < -1))
5664 return Fail(
"%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
5669 return Fail(
"%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
5675 assert(layer !=
nullptr);
5683 VLOG(DRIVER) <<
"Converter::ConvertTranspose()";
5688 return Fail(
"%s: Operation has invalid inputs", __func__);
5695 Fail(
"%s: Inputs with rank greater than 4 are not supported", __func__);
5702 std::vector<int32_t> perm(rank);
5703 if (!permOperand || (permOperand->lifetime == OperandLifeTime::NO_VALUE))
5705 for (
unsigned int i = rank; i > 0; i--)
5712 return Fail(
"%s: Operation has an invalid or unsupported permutation operand", __func__);
5715 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
5723 return Fail(
"%s: Could not read output 0", __func__);
5728 bool isSupported =
false;
5732 IsTransposeSupported,
5746 validateFunc(outputInfo, isSupported);
5755 assert(layer !=
nullptr);
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
bool m_BiasEnabled
Enable/disable bias.
bool IsQuantizedLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo ¶msInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
unsigned int GetWidthIndex() const
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
uint32_t m_PadBottom
Padding bottom value in the height dimension.
::android::nn::Operand Operand
float m_ClippingThresProj
Clipping threshold value for the projection.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
constexpr const char * GetResizeMethodAsCString(ResizeMethod method)
const std::vector< armnn::BackendId > m_Backends
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
bool HasPerAxisQuantization() const
bool m_PeepholeEnabled
Enable/disable peephole.
bool ConvertReduce(const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
static bool ConvertOperation(const Operation &operation, const Model &model, ConversionData &data)
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
std::vector< unsigned int > m_OutputShape
Optional< unsigned int > GetQuantizationDim() const
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool GetOptionalBool(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
const TensorShape & GetShape() const
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
float m_Beta
Exponentiation value.
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const armnn::ConstTensor * GetConstTensorPtr() const
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
bool GetOperandType(const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
bool GetOptionalInputActivation(const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
std::vector< float > GetQuantizationScales() const
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
unsigned int GetHeightIndex() const
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
void SetShape(const TensorShape &newShape)
const armnn::PermutationVector g_DontPermute
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
constexpr char const * GetUnaryOperationAsCString(UnaryOperation operation)
TensorShape m_TargetShape
Target shape value.
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
constexpr char const * GetLogicalBinaryOperationAsCString(LogicalBinaryOperation operation)
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
uint32_t m_PadTop
Padding top value in the height dimension.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
constexpr char const * GetArgMinMaxFunctionAsCString(ArgMinMaxFunction function)
bool ConvertPooling2d(const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool m_LayerNormEnabled
Enable/disable layer normalization.
An LstmDescriptor for the LstmLayer.
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
armnn::INetworkPtr m_Network
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
An ArgMinMaxDescriptor for ArgMinMaxLayer.
float GetQuantizationScale() const
DataType GetDataType() const
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
An OriginsDescriptor for the ConcatLayer.
float m_ProjectionClip
Clipping threshold value for the projection.
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool has_value() const noexcept
A FullyConnectedDescriptor for the FullyConnectedLayer.
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
float m_InputIntermediateScale
Input intermediate quantization scale.
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
bool ConvertPaddings(const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_PadTop
Padding top value in the height dimension.
void SetQuantizationScale(float scale)
#define ARMNN_ASSERT(COND)
A QLstmDescriptor for the QLstmLayer.
armnn::TensorShape ExpandDims(const armnn::TensorShape &tensorShape, int axis)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
const TensorInfo & GetInfo() const
min(a, max(b, input)) ReLu1 & ReLu6.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
A BatchMatMulDescriptor for the BatchMatMul operator.
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
bool GetInputActivationFunctionFromTensor(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
::android::nn::OperandType OperandType
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
bool IsWeightsValid(const Operation &operation, uint32_t inputIndex, const Model &model)
Utility functions.
::android::nn::Operation Operation
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
uint32_t m_DilationX
Dilation along x axis.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool ConvertToActivation(const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported,...)
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
bool m_ProjectionEnabled
Enable/disable the projection layer.
const armnn::ConstTensor & GetConstTensor() const
bool IsConnectedToDequantize(armnn::IOutputSlot *ioutputSlot)
OriginsDescriptor ConcatDescriptor
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
virtual bool IsTensorInfoSet() const =0
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
uint32_t m_PadRight
Padding right value in the width dimension.
bool IsOperandConstant(const Operand &operand)
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
int m_Axis
Axis to reduce across the input tensor.
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
void SetQuantizationOffset(int32_t offset)
bool isQuantizedOperand(const OperandType &operandType)
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
virtual int Connect(IInputSlot &destination)=0
Krichevsky 2012: Local Brightness Normalization.
bool GetOptionalConvolutionDilationParams(const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
::android::nn::Model Model
void SetQuantizationScales(const std::vector< float > &scales)
float m_CellIntermediateScale
Cell intermediate quantization scale.
unsigned int GetNumDimensions() const
unsigned int GetChannelsIndex() const
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
bool GetInputFloat32(const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
A FillDescriptor for the FillLayer.
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
uint32_t m_PadLeft
Padding left value in the width dimension.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
constexpr unsigned int GetDataTypeSize(DataType dataType)
uint32_t m_PadRight
Padding right value in the width dimension.
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.