19 #include <ActivationFunctor.h>
20 #include <CpuExecutor.h>
21 #include <OperationsUtils.h>
37 #include <nnapi/OperandTypes.h>
38 #include <nnapi/Result.h>
39 #include <nnapi/TypeUtils.h>
40 #include <nnapi/Types.h>
41 #include <nnapi/Validation.h>
119 std::vector<uint8_t> m_SwizzledTensorData;
144 template<
class... Args>
145 static bool Fail(
const char* formatStr, Args&&... args)
147 ALOGD(formatStr, std::forward<Args>(args)...);
153 #define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
156 for (auto&& backendId : backends) \
158 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
159 if (layerSupportObject.IsBackendRegistered()) \
161 std::string reasonIfUnsupported; \
163 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
166 setBackend = backendId; \
171 if (reasonIfUnsupported.size() > 0) \
173 VLOG(DRIVER) << funcName << ": not supported by armnn: " << reasonIfUnsupported.c_str(); \
177 VLOG(DRIVER) << funcName << ": not supported by armnn"; \
183 VLOG(DRIVER) << funcName << ": backend not registered: " << backendId.Get().c_str(); \
188 VLOG(DRIVER) << funcName << ": not supported by any specified backend"; \
191 catch (const armnn::InvalidArgumentException &e) \
193 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
202 inline bool IsOperandTypeSupportedForTensors(
OperandType type)
204 return type == OperandType::BOOL ||
205 type == OperandType::TENSOR_BOOL8 ||
206 type == OperandType::TENSOR_FLOAT16 ||
207 type == OperandType::TENSOR_FLOAT32 ||
208 type == OperandType::TENSOR_QUANT8_ASYMM ||
209 type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
210 type == OperandType::TENSOR_QUANT8_SYMM ||
211 type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
212 type == OperandType::TENSOR_QUANT16_SYMM ||
213 type == OperandType::TENSOR_INT32;
216 inline bool IsBool(
Operand operand)
218 return operand.type == OperandType::BOOL;
221 inline bool Is12OrLaterOperand(
Operand)
227 template<
typename LayerHandleType>
229 LayerHandleType& inputLayer,
242 return *reshapeLayer;
252 unsigned int inputSize = weightsShape[1];
254 unsigned int batchSize = totalInputElements / inputSize;
256 if(totalInputElements % batchSize != 0)
258 throw std::runtime_error(
"Failed to deduce tensor shape");
272 bool transposeWeightMatrix)
274 unsigned int dimIdx = transposeWeightMatrix ? 0 : 1;
275 return (inputShape[0] == outputShape[0] && weightsShape[dimIdx] == outputShape[1]);
291 if (inputDimensions0 == inputDimensions1)
303 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
304 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
305 armnn::numeric_cast<int>(inputDimensions1));
307 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
312 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
313 for (
unsigned int i = sizeDifference; i < maxInputDimensions; i++)
315 reshapedDimensions[i] = smallShape[i - sizeDifference];
320 reshapedDimensions.data() });
325 bool isSupported =
false;
374 void CalcPadding(uint32_t input,
377 uint32_t& outPadHead,
378 uint32_t& outPadTail,
379 PaddingScheme scheme)
383 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
384 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
385 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
388 void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
389 uint32_t& outPadTail, ::android::nn::PaddingScheme scheme)
393 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
394 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
395 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
398 inline void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
399 int32_t& outPadTail, ::android::nn::PaddingScheme scheme)
401 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
408 shape.dimensions = operand.dimensions;
409 shape.scale = operand.scale;
410 shape.offset = operand.zeroPoint;
427 auto UpdateBiasScaleValue = [&inputInfo](
float biasScale) ->
float
433 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
439 VLOG(DRIVER) <<
"Bias quantization params have been updated for per-axis quantization";
448 VLOG(DRIVER) <<
"Bias quantization scale has been modified to match input * weights";
464 template<
typename OSlot>
483 bool ValidateConcatOutputShape(
const std::vector<armnn::TensorShape> & inputShapes,
488 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
491 return Fail(
"%s: Output shape has wrong number of dimensions", __func__);
494 unsigned int outputSizeAlongConcatenatedDimension = 0;
495 for (
unsigned int i = 0; i < inputShapes.size(); i++)
497 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
500 for (
unsigned int i = 0; i < numDimensions; ++i)
504 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
507 "%s: Invalid output shape for dimension %d (%d != %d)",
511 outputSizeAlongConcatenatedDimension);
516 if (outputShape[i] != inputShapes[0][i])
518 return Fail(
"%s: Invalid output shape", __func__);
532 std::vector<LayerInputHandle>& inputs,
533 std::vector<armnn::TensorShape>& inputShapes,
535 std::vector<armnn::BackendId>& setBackends)
537 if (!mapping.
IsEqual(IdentityPermutation4D))
539 size_t nInputs = inputs.size();
540 for (
size_t i=0; i<nInputs; ++i)
549 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
555 std::vector<LayerInputHandle>& inputs,
556 std::vector<armnn::TensorShape>& inputShapes,
560 if (!mapping.
IsEqual(IdentityPermutation4D) && !mapping.
IsEqual(IdentityPermutation3D))
562 std::vector<armnn::BackendId> setBackendsVec;
564 size_t nInputs = inputs.size();
565 for (
size_t i=0; i<nInputs; ++i)
572 bool isSupported =
false;
575 IsTransposeSupported,
579 inputs[i].GetTensorInfo(),
582 setBackendsVec.push_back(setBackend);
589 SwizzleInputs(*data.
m_Network, inputs, inputShapes, mapping, setBackendsVec);
594 bool CreateConcatPermutationParameters(
const unsigned int numberOfDimensions,
595 int32_t & concatDimension,
596 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
598 bool needPermute =
false;
604 if (numberOfDimensions == 4 && concatDimension == 2)
607 permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
610 else if (numberOfDimensions == 3 && concatDimension == 1)
613 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
618 else if (numberOfDimensions == 3 && concatDimension == 2)
620 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
638 ActivationFn activation,
646 bool failOnIndexOutOfBounds =
true)
648 if (inputIndex >= operation.inputs.size())
650 if (failOnIndexOutOfBounds)
652 Fail(
"%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
659 return &
getMainModel(model).operands[operation.inputs[inputIndex]];
663 uint32_t outputIndex,
666 if (outputIndex >= operation.outputs.size())
668 Fail(
"%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
675 return &
getMainModel(model).operands[operation.outputs[outputIndex]];
681 bool optional =
false);
691 return Fail(
"%s: invalid input operand at index %i", __func__, inputIndex);
694 type = operand->type;
702 return lifetime == OperandLifeTime::CONSTANT_COPY ||
703 lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
704 lifetime == OperandLifeTime::POINTER ||
705 lifetime == OperandLifeTime::NO_VALUE;
715 bool optional =
false,
725 bool optional =
false)
730 Fail(
"%s: failed to get input operand: index=%u", __func__, inputIndex);
741 template <
typename OutputType>
745 OutputType& outValue,
748 bool optional =
false)
751 if (!optional && !operand)
753 return Fail(
"%s: invalid input operand at index %i", __func__, inputIndex);
756 if (!optional && operand->type != type)
758 VLOG(DRIVER) << __func__ <<
": unexpected operand type: " << operand->type <<
" should be: " << type;
762 if (!optional && operand->location.length !=
sizeof(OutputType))
764 return Fail(
"%s: incorrect operand location length: %i (should be %i)",
765 __func__, operand->location.length,
sizeof(OutputType));
769 if (!optional && !valueAddress)
771 return Fail(
"%s: failed to get address for operand", __func__);
776 outValue = *(
static_cast<const OutputType*
>(valueAddress));
788 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
797 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
803 ActivationFn& outActivationFunction,
807 if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
809 VLOG(DRIVER) << __func__ <<
": unexpected operand type: " << type
810 <<
" should be OperandType::INT32 or OperandType::TENSOR_INT32";
814 int32_t activationFunctionAsInt;
815 if (!
GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
817 return Fail(
"%s: failed to get activation input value", __func__);
819 outActivationFunction =
static_cast<ActivationFn
>(activationFunctionAsInt);
825 ActivationFn& outActivationFunction,
832 outActivationFunction,
839 ActivationFn& outActivationFunction,
847 outActivationFunction,
855 ActivationFn& activationFunction,
859 if (operation.inputs.size() <= inputIndex)
861 activationFunction = ActivationFn::kActivationNone;
867 return Fail(
"%s: Operation has invalid inputs", __func__);
873 template<
typename ConvolutionDescriptor>
875 uint32_t dilationXIndex,
876 ConvolutionDescriptor& descriptor,
881 if (operation.inputs.size() >= dilationXIndex + 2)
886 descriptor.m_DilationX,
892 descriptor.m_DilationY,
911 if (!IsBool(*operand))
922 return *(
static_cast<const bool*
>(valueAddress));
926 std::vector<int32_t>& outValues,
932 PaddingScheme& outPaddingScheme,
944 uint32_t operationOutputIndex,
946 uint32_t layerOutputIndex,
950 const std::function <
void (
const armnn::TensorInfo&,
bool&)>& validateFunc =
nullptr,
951 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
952 bool inferOutputShapes =
false);
961 uint32_t outputIndex,
966 const std::function <
void (
const armnn::TensorInfo&,
bool&)>& validateFunc =
nullptr,
967 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
981 const char* operationName,
997 const char* operationName,
1004 return operand.type == OperandType::TENSOR_QUANT8_SYMM;
1024 size_t operandIndex,
1025 bool optional =
false);