// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "HalPolicy.hpp" #include "Utils.hpp" #include "../1.0/HalPolicy.hpp" #include "../1.1/HalPolicy.hpp" #include #include #include namespace armnn_driver { namespace hal_1_2 { bool HandledByV1_0(V1_2::OperationType operationType) { switch (static_cast(operationType)) { case V1_0::OperationType::ADD: case V1_0::OperationType::AVERAGE_POOL_2D: case V1_0::OperationType::CONCATENATION: case V1_0::OperationType::DEPTH_TO_SPACE: case V1_0::OperationType::DEQUANTIZE: case V1_0::OperationType::EMBEDDING_LOOKUP: case V1_0::OperationType::FLOOR: case V1_0::OperationType::FULLY_CONNECTED: case V1_0::OperationType::HASHTABLE_LOOKUP: case V1_0::OperationType::L2_NORMALIZATION: case V1_0::OperationType::L2_POOL_2D: case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION: case V1_0::OperationType::LOGISTIC: case V1_0::OperationType::LSH_PROJECTION: case V1_0::OperationType::LSTM: case V1_0::OperationType::MAX_POOL_2D: case V1_0::OperationType::MUL: case V1_0::OperationType::RESHAPE: case V1_0::OperationType::RNN: case V1_0::OperationType::SVDF: case V1_0::OperationType::OEM_OPERATION: return true; default: return false; } } bool HandledByV1_1(V1_2::OperationType operationType) { if (HandledByV1_0(operationType)) { return true; } switch (static_cast(operationType)) { case V1_1::OperationType::BATCH_TO_SPACE_ND: case V1_1::OperationType::DIV: case V1_1::OperationType::MEAN: case V1_1::OperationType::SPACE_TO_BATCH_ND: case V1_1::OperationType::SQUEEZE: case V1_1::OperationType::STRIDED_SLICE: case V1_1::OperationType::SUB: case V1_1::OperationType::TRANSPOSE: return true; default: return false; } } bool HandledByV1_0(const V1_2::Operation& operation) { return HandledByV1_0(operation.type); } bool HandledByV1_1(const V1_2::Operation& operation) { return HandledByV1_1(operation.type); } V1_0::OperationType CastToV1_0(V1_2::OperationType type) { return static_cast(type); } V1_1::OperationType CastToV1_1(V1_2::OperationType type) { return static_cast(type); } V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation) { V1_0::Operation op; op.type = CastToV1_0(operation.type); op.inputs = operation.inputs; op.outputs = operation.outputs; return op; } V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation) { V1_1::Operation op; op.type = CastToV1_1(operation.type); op.inputs = operation.inputs; op.outputs = operation.outputs; return op; } bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data) { if (HandledByV1_0(operation) && compliantWithV1_0(model)) { hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation); hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model); return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data); } if (HandledByV1_1(operation) && compliantWithV1_1(model)) { hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation); hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model); return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data); } switch (operation.type) { case V1_2::OperationType::CONV_2D: return ConvertConv2d(operation, model, data); case V1_2::OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation, model, data); case V1_2::OperationType::MAXIMUM: return ConvertMaximum(operation, model, data); case V1_2::OperationType::MINIMUM: return ConvertMinimum(operation, model, data); case V1_2::OperationType::PAD: return ConvertPad(operation, model, data); case V1_2::OperationType::PAD_V2: return ConvertPadV2(operation, model, data); case V1_2::OperationType::PRELU: return ConvertPrelu(operation, model, data); case V1_2::OperationType::RELU: return ConvertReLu(operation, model, data); case V1_2::OperationType::RELU1: return ConvertReLu1(operation, model, data); case V1_2::OperationType::RELU6: return ConvertReLu6(operation, model, data); case V1_2::OperationType::RESIZE_BILINEAR: return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear); case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR: return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor); case V1_2::OperationType::SOFTMAX: return ConvertSoftmax(operation, model, data); case V1_2::OperationType::SPACE_TO_DEPTH: return ConvertSpaceToDepth(operation, model, data); case V1_2::OperationType::TANH: return ConvertTanH(operation, model, data); default: return Fail("%s: Operation type %s not supported in ArmnnDriver", __func__, toString(operation.type).c_str()); } } bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertConv2d()"); LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); if (!input.IsValid()) { return Fail("%s: Operation has invalid inputs", __func__); } const Operand* output = GetOutputOperand(operation, 0, model); if (!output) { return Fail("%s: Could not read output 0", __func__); } const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } armnn::Convolution2dDescriptor desc; desc.m_DataLayout = armnn::DataLayout::NHWC; // Determine whether padding is implicit or explicit bool implicitPadding = operation.inputs.size() == 7 || (operation.inputs.size() >= 8 && GetInputOperand(operation, 7, model)->type == OperandType::BOOL); if (implicitPadding) { desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data); } else if (operation.inputs.size() >= 10) { desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data); } const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1}; // ArmNN does not currently support non-fixed weights or bias // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if // the DataLayout is NCHW const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ? ConvertOperationInputToConstTensorPin(operation, 1, model, data, OHWIToOIHW) : ConvertOperationInputToConstTensorPin(operation, 1, model, data); const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); if (!weightsPin.IsValid()) { return Fail("%s: Operation has invalid weights", __func__); } if (!biasPin.IsValid()) { return Fail("%s: Operation has invalid biases", __func__); } armnn::ConstTensor weights = weightsPin.GetConstTensor(); armnn::ConstTensor bias = biasPin.GetConstTensor(); SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo); ActivationFn activation; if (implicitPadding) { android::nn::PaddingScheme paddingScheme; if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) || !GetInputActivationFunction(operation, 6, activation, model, data) || !GetOptionalConvolutionDilationParams(operation, 8, desc, model, data)) { return Fail("%s: Operation has invalid inputs (implicit padding)", __func__); } armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout); unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex(); unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex(); const uint32_t kernelX = weights.GetShape()[widthIndex]; const uint32_t kernelY = weights.GetShape()[heightIndex]; const uint32_t inputX = inputInfo.GetShape()[widthIndex]; const uint32_t inputY = inputInfo.GetShape()[heightIndex]; CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme); CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme); } else if (operation.inputs.size() >= 10) { // explicit padding if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) || !GetInputActivationFunction(operation, 9, activation, model, data) || !GetOptionalConvolutionDilationParams(operation, 11, desc, model, data)) { return Fail("%s: Operation has invalid inputs (explicit padding)", __func__); } } else { return Fail("%s: Unsupported number of operation inputs", __func__); } desc.m_BiasEnabled = true; armnn::Optional biases(bias.GetInfo()); bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConvolution2dSupported, data.m_Backends, isSupported, inputInfo, outputInfo, desc, weights.GetInfo(), biases); if (!isSupported) { return false; } armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional(bias)); if (!startLayer) { return Fail("%s: AddConvolution2dLayer failed", __func__); } armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data); if (!endLayer) { return Fail("%s: ProcessActivation failed", __func__); } input.Connect(startLayer->GetInputSlot(0)); return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()"); LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); if (!input.IsValid()) { return Fail("%s: Operation has invalid inputs", __func__); } const Operand* output = GetOutputOperand(operation, 0, model); if (!output) { return Fail("%s: Could not read output 0", __func__); } const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } // ArmNN does not currently support non-fixed weights or bias // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ] const Operand* weightsOperand = GetInputOperand(operation, 1, model); if (weightsOperand == nullptr) { return Fail("%s: Operand is invalid", __func__); } armnn::DepthwiseConvolution2dDescriptor desc; desc.m_DataLayout = armnn::DataLayout::NHWC; // Determine whether padding is implicit or explicit bool implicitPadding = operation.inputs.size() == 8 || (operation.inputs.size() >= 9 && GetInputOperand(operation, 8, model)->type == OperandType::BOOL); // Look ahead to find the optional DataLayout, if present const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11; desc.m_DataLayout = OptionalDataLayout(operation, dataLayoutFlagIndex, model, data); armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout); unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex(); unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex(); unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex(); // Reinterpret weight data as [ H, W, I, M ] armnn::TensorShape weightsShape({ weightsOperand->dimensions[1], weightsOperand->dimensions[2], inputInfo.GetShape()[channelsIndex], weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] }); // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ] const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U }; const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data, HWIMToMIHW, &weightsShape); // Bias is a 1D tensor const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); if (!weightsPin.IsValid()) { return Fail("%s: Operation has invalid weights", __func__); } if (!biasPin.IsValid()) { return Fail("%s: Operation has invalid biases", __func__); } armnn::ConstTensor weights = weightsPin.GetConstTensor(); armnn::ConstTensor bias = biasPin.GetConstTensor(); SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo); ActivationFn activation; if (implicitPadding) { android::nn::PaddingScheme paddingScheme; if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) || !GetInputActivationFunction(operation, 7, activation, model, data) || !GetOptionalConvolutionDilationParams(operation, 9, desc, model, data)) { return Fail("%s: Operation has invalid inputs (implicit padding)", __func__); } const uint32_t kernelX = weights.GetShape()[3]; const uint32_t kernelY = weights.GetShape()[2]; const uint32_t inputX = inputInfo.GetShape()[widthIndex]; const uint32_t inputY = inputInfo.GetShape()[heightIndex]; CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme); CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme); } else if (operation.inputs.size() >= 11) { // explicit padding if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) || !GetInputActivationFunction(operation, 10, activation, model, data) || !GetOptionalConvolutionDilationParams(operation, 12, desc, model, data)) { return Fail("%s: Operation has invalid inputs (explicit padding)", __func__); } } else { return Fail("%s: Unsupported number of operation inputs", __func__); } desc.m_BiasEnabled = true; armnn::Optional biases(bias.GetInfo()); bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsDepthwiseConvolutionSupported, data.m_Backends, isSupported, inputInfo, outputInfo, desc, weights.GetInfo(), biases); if (!isSupported) { return false; } armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional(bias)); if (!startLayer) { return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__); } armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data); if (!endLayer) { return Fail("%s: ProcessActivation failed", __func__); } input.Connect(startLayer->GetInputSlot(0)); return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertMaximum()"); LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); if (!input0.IsValid() || !input1.IsValid()) { return Fail("%s: Operation has invalid inputs", __func__); } const Operand* outputOperand = GetOutputOperand(operation, 0, model); if (!outputOperand) { return Fail("%s: Could not read output", __func__); } const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand); if (IsDynamicTensor(outInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsMaximumSupported, data.m_Backends, isSupported, input0.GetTensorInfo(), input1.GetTensorInfo(), outInfo); if (!isSupported) { return false; } armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer(); assert(layer != nullptr); BroadcastTensor(input0, input1, layer, *data.m_Network); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertMinimum()"); LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); if (!input0.IsValid() || !input1.IsValid()) { return Fail("%s: Operation has invalid inputs", __func__); } const Operand* output = GetOutputOperand(operation, 0, model); if (!output) { return Fail("%s: Could not read output 0", __func__); } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsMinimumSupported, data.m_Backends, isSupported, input0.GetTensorInfo(), input1.GetTensorInfo(), outputInfo); if (!isSupported) { return false; } armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer(); assert(layer != nullptr); BroadcastTensor(input0, input1, layer, *data.m_Network); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertPad()"); return ::ConvertPad(operation, model, data); } bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertPadV2()"); LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); if (!input.IsValid()) { return Fail("%s: Could not read input 0", __func__); } const Operand* output = GetOutputOperand(operation, 0, model); if (!output) { return Fail("%s: Could not read output", __func__); } const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); unsigned int rank = inputInfo.GetNumDimensions(); armnn::PadDescriptor descriptor; if (!ConvertPaddings(operation, model, data, rank, descriptor)) { return Fail("%s: Could not convert paddings", __func__); } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } // Determine type of padding value OperandType operandType0; OperandType operandType2; if (!GetOperandType(operation, 0, model, operandType0) || !GetOperandType(operation, 2, model, operandType2)) { return Fail("%s: Operation has invalid inputs", __func__); } // Read value to use for padding if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16) { armnn::Half f16PadValue; if (!GetInputScalar(operation, 2, operandType2, f16PadValue, model, data)) { return Fail("%s: Could not read input 2 (FLOAT16)", __func__); } descriptor.m_PadValue = f16PadValue; } else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32) { if (!GetInputFloat32(operation, 2, descriptor.m_PadValue, model, data)) { return Fail("%s: Could not read input 2 (FLOAT32)", __func__); } } else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32) { int32_t intPadValue = 0; if (!GetInputInt32(operation, 2, intPadValue, model, data)) { return Fail("%s: Could not read input 2 (INT32)", __func__); } descriptor.m_PadValue = intPadValue; } else { return Fail("%s: Operation has invalid inputs: type mismatch", __func__); } bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsPadSupported, data.m_Backends, isSupported, inputInfo, outputInfo, descriptor); if (!isSupported) { return false; } armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); layer->GetOutputSlot(0).SetTensorInfo(outputInfo); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertPrelu()"); LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); LayerInputHandle alpha = ConvertToLayerInputHandle(operation, 1, model, data); if (!input.IsValid() || !alpha.IsValid()) { return Fail("%s: Operation has invalid inputs", __func__); } const Operand* output = GetOutputOperand(operation, 0, model); if (!output) { return Fail("%s: Could not read output", __func__); } const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo(); const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsPreluSupported, data.m_Backends, isSupported, inputInfo, alphaInfo, outputInfo); if (!isSupported) { return false; } armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer(); if (!layer) { return Fail("%s: AddPreluLayer failed", __func__); } BroadcastTensor(input, alpha, layer, *data.m_Network); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertReLu()"); return ::ConvertReLu(operation, model, data); } bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertReLu1()"); return ::ConvertReLu1(operation, model, data); } bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertReLu6()"); return ::ConvertReLu6(operation, model, data); } bool HalPolicy::ConvertResize(const Operation& operation, const Model& model, ConversionData& data, armnn::ResizeMethod resizeMethod) { ALOGV("hal_1_2::HalPolicy::ConvertResize()"); LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); if (!input.IsValid()) { return Fail("%s: Could not read input 0", __func__); } const Operand* output = GetOutputOperand(operation, 0, model); if (!output) { return Fail("%s: Could not read output 0", __func__); } const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } armnn::ResizeDescriptor descriptor; descriptor.m_Method = resizeMethod; descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data); OperandType operandType1; OperandType operandType2; if (!GetOperandType(operation, 1, model, operandType1) || !GetOperandType(operation, 2, model, operandType2)) { return Fail("%s: Operation has invalid inputs", __func__); } if (operandType1 != operandType2) { return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__); } if (operandType1 == OperandType::INT32) { // Case 1: resizing by shape int32_t targetWidth = 0; int32_t targetHeight = 0; if (!GetInputInt32(operation, 1, targetWidth, model, data) || !GetInputInt32(operation, 2, targetHeight, model, data)) { return Fail("%s: Operation has invalid inputs for resizing by shape", __func__); } if (targetWidth < 0 || targetHeight < 0) { return Fail("%s: Operation has invalid inputs for resizing by shape. " "Target width/height cannot be < 0", __func__); } descriptor.m_TargetWidth = static_cast(targetWidth); descriptor.m_TargetHeight = static_cast(targetHeight); } else if (operandType1 == OperandType::FLOAT32) { // Case 2: resizing by scale float widthScale = 1.0f; float heightScale = 1.0f; if (!GetInputFloat32(operation, 1, widthScale, model, data) || !GetInputFloat32(operation, 2, heightScale, model, data)) { return Fail("%s: Operation has invalid inputs for resizing by scale", __func__); } const armnn::TensorShape& inputShape = inputInfo.GetShape(); armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout); float width = inputShape[dataLayoutIndexed.GetWidthIndex()]; float height = inputShape[dataLayoutIndexed.GetHeightIndex()]; descriptor.m_TargetWidth = std::floor(width * widthScale); descriptor.m_TargetHeight = std::floor(height * heightScale); } else { // NOTE: FLOAT16 scales are not supported return false; } bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsResizeSupported, data.m_Backends, isSupported, inputInfo, outputInfo, descriptor); if (!isSupported) { return false; } armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor); assert(layer != nullptr); layer->GetOutputSlot(0).SetTensorInfo(outputInfo); input.Connect(layer->GetInputSlot(0)); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()"); LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); if (!input.IsValid() ) { return Fail("%s: Operation has invalid inputs", __func__); } const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); unsigned int rank = inputInfo.GetNumDimensions(); if (rank != 4) { return Fail("%s: Only inputs with rank 4 are supported", __func__); } const Operand* output = GetOutputOperand(operation, 0, model); if (!output) { return Fail("%s: Could not read output 0", __func__); } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } armnn::SpaceToDepthDescriptor desc; GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data); if (desc.m_BlockSize <= 1) { return Fail("%s: Block size must be at least 1 in all dimensions"); } desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data); bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsSpaceToDepthSupported, data.m_Backends, isSupported, inputInfo, outputInfo, desc); if (!isSupported) { return false; } armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc); assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()"); LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); if (!input.IsValid()) { return Fail("%s: Operation has invalid inputs", __func__); } const Operand* outputOperand = GetOutputOperand(operation, 0, model); if (!outputOperand) { return Fail("%s: Operation has no outputs", __func__); } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); } armnn::SoftmaxDescriptor desc; if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data)) { return Fail("%s: Operation has invalid inputs", __func__); } if (operation.inputs.size() > 2 && !GetInputScalar(operation, 2, HalPolicy::OperandType::INT32, desc.m_Axis, model, data)) { return Fail("%s: Operation has invalid inputs", __func__); } bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, IsSoftmaxSupported, data.m_Backends, isSupported, input.GetTensorInfo(), outputInfo, desc); if (!isSupported) { return false; } armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc); assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertTanH()"); return ::ConvertTanH(operation, model, data); } } // namespace hal_1_2 } // namespace armnn_driver