From 462728090eac533e3122080a86129541df128fe3 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 14 Aug 2019 17:00:48 +0100 Subject: IVGCVSW-3633 Refactor HalPolicy to fully support V1.2 models * Templated and moved V1.0 and V1.1 Convert methods to ensure they can work with later versions of models, operations and operands. * The V1.2 HalPolicy no longer converts V1.2 models, operations and operands to earlier versions. * The V1.2 HalPolicy no longer passes operations to the V1.1 or V1.0 HalPolicies for conversion. Signed-off-by: Mike Kelly Change-Id: I5de59d43a3abb1f8ac0253dc637ad68318960c76 --- ConversionUtils.hpp | 1045 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 1004 insertions(+), 41 deletions(-) (limited to 'ConversionUtils.hpp') diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 32efa540..cfbef5a8 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -14,6 +14,8 @@ #include "armnn/src/armnnUtils/DataLayoutIndexed.hpp" #include "armnn/src/armnnUtils/Permute.hpp" +#include "1.0/FullyConnected.hpp" + #include #include #include @@ -341,6 +343,20 @@ Shape GetOperandShape(const V1_0::Operand& operand) return shape; } +#ifdef ARMNN_ANDROID_NN_V1_2 + +Shape GetOperandShape(const V1_2::Operand& operand) +{ + Shape shape; + shape.type = OperandType(operand.type); + shape.dimensions = operand.dimensions; + shape.scale = operand.scale; + shape.offset = operand.zeroPoint; + return shape; +} + +#endif + // ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also // what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so // we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the @@ -1417,6 +1433,71 @@ bool ConvertPooling2d(const HalOperation& operation, return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } +template +bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) + { + return false; + } + + const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo(); + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsAdditionSupported, + data.m_Backends, + isSupported, + inputInfo0, + inputInfo1, + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + if (endLayer != nullptr) + { + BroadcastTensor(input0, input1, startLayer, *data.m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + else + { + return Fail("%s: ProcessActivation failed", __func__); + } +} + template @@ -1918,41 +1999,25 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model } template -bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data) + typename Operation = typename HalPolicy::Operation, + typename Model = typename HalPolicy::Model> +bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data) { + using Operand = typename HalPolicy::Operand; + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); if (!input.IsValid()) { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - unsigned int rank = inputInfo.GetNumDimensions(); - - armnn::PadDescriptor descriptor; - if (!ConvertPaddings(operation, model, data, rank, descriptor)) - { - return Fail("%s: Could not convert paddings", __func__); - } - - // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad - // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as - // (QuantizationOffset - QuantizationOffset) * scale = 0. - if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8) - { - descriptor.m_PadValue = inputInfo.GetQuantizationOffset(); + return Fail("%s: Operation has invalid input", __func__); } - const HalOperand* output = GetOutputOperand(operation, 0, model); - if (!output) + const Operand* const outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) { - return Fail("%s: Could not read output", __func__); + return Fail("%s: Operation has invalid outputs", __func__); } - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); @@ -1960,31 +2025,30 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsPadSupported, + IsDequantizeSupported, data.m_Backends, isSupported, - inputInfo, - outputInfo, - descriptor); + input.GetTensorInfo(), + GetTensorInfoForOperand(*outputOperand)); if (!isSupported) { return false; } - armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); + armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer(); assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - layer->GetOutputSlot(0).SetTensorInfo(outputInfo); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } template -bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data) +bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data) { + using Operand = typename HalPolicy::Operand; + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); @@ -2015,7 +2079,7 @@ bool ConvertSub(const Operation& operation, const Model& model, ConversionData& bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsSubtractionSupported, + IsDivisionSupported, data.m_Backends, isSupported, input0.GetTensorInfo(), @@ -2026,21 +2090,920 @@ bool ConvertSub(const Operation& operation, const Model& model, ConversionData& return false; } - armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer(); - armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); - - const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); - const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); if (endLayer) { BroadcastTensor(input0, input1, startLayer, *data.m_Network); return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } - return Fail("%s: ProcessActivation failed", __func__); } +template +bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* const outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) + { + return Fail("%s: Operation has invalid outputs", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFloorSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer(); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + // ArmNN does not currently support non-fixed weights or bias + ConstTensorPin weightsPin = + ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D + ConstTensorPin biasPin = + ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D + + if (!weightsPin.IsValid() || !biasPin.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + armnn::ConstTensor weights = weightsPin.GetConstTensor(); + armnn::ConstTensor bias = biasPin.GetConstTensor(); + armnn::TensorInfo reshapedInfo = inputInfo; + + try + { + reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape())); + } catch (const std::exception &e) { + return Fail("%s: %s", __func__, e.what()); + } + + // ensuring that the bias value is within 1% of the weights input (small float differences can exist) + SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo); + + ActivationFn activationFunction; + if (!GetInputActivationFunction(operation, 3, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + armnn::FullyConnectedDescriptor desc; + desc.m_TransposeWeightMatrix = true; + desc.m_BiasEnabled = true; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFullyConnectedSupported, + data.m_Backends, + isSupported, + reshapedInfo, + outputInfo, + weights.GetInfo(), + bias.GetInfo(), + desc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* startLayer = + data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional(bias)); + armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + if (endLayer != nullptr) + { + if (inputInfo.GetNumDimensions() > 2U) + { + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape(); + + armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor); + assert(reshapeLayer != nullptr); + input.Connect(reshapeLayer->GetInputSlot(0)); + reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo); + reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0)); + } + else + { + input.Connect(startLayer->GetInputSlot(0)); + } + + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + else + { + return Fail("%s: ProcessActivation failed", __func__); + } +} + +template +bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + if (outputInfo.GetNumDimensions() != 4u) + { + return Fail("%s: Tensor Rank other than 4 is not supported", __func__); + } + + armnn::L2NormalizationDescriptor desc; + desc.m_DataLayout = armnn::DataLayout::NHWC; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsL2NormalizationSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertLocalResponseNormalization(const Operation& operation, + const Model& model, + ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + using OperandType = typename HalPolicy::OperandType; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + if (outputInfo.GetNumDimensions() != 4u) + { + return Fail("%s: Tensor Rank other than 4 is not supported", __func__); + } + + armnn::NormalizationDescriptor descriptor; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across; + descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness; + + if (!input.IsValid() || + !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) || + !GetInputFloat32(operation, 2, descriptor.m_K, model, data) || + !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) || + !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // ArmNN expects normSize to be the full size of the normalization + // window rather than the radius as in AndroidNN. + descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsNormalizationSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + + armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + armnn::ActivationDescriptor desc; + desc.m_Function = armnn::ActivationFunction::Sigmoid; + + return ConvertToActivation(operation, __func__, desc, model, data); +} + +template +bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + const Operand* axisOperand = GetInputOperand(operation, 1, model); + if (!axisOperand) + { + return Fail("%s: Could not read input 1", __func__); + } + + std::vector axis; + if (!GetTensorInt32Values(*axisOperand, axis, model, data)) + { + return Fail("%s: Input 1 has invalid values", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + + // Convert the axis to unsigned int and remove duplicates. + unsigned int rank = inputInfo.GetNumDimensions(); + std::set uniqueAxis; + std::transform(axis.begin(), axis.end(), + std::inserter(uniqueAxis, uniqueAxis.begin()), + [rank](int i) -> unsigned int { return (i + rank) % rank; }); + + // Get the "keep dims" flag. + int32_t keepDims = 0; + if (!GetInputInt32(operation, 2, keepDims, model, data)) + { + return Fail("%s: Could not read input 2", __func__); + } + + armnn::MeanDescriptor descriptor; + descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end()); + descriptor.m_KeepDims = keepDims > 0; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsMeanSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + + if (outputOperand == nullptr) + { + return false; + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsMultiplicationSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + + if (endLayer != nullptr) + { + BroadcastTensor(input0, input1, startLayer, *data.m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + else + { + return Fail("%s: ProcessActivation failed", __func__); + } +} + +template +bool ConvertPad(Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + + armnn::PadDescriptor descriptor; + if (!ConvertPaddings(operation, model, data, rank, descriptor)) + { + return Fail("%s: Could not convert paddings", __func__); + } + + // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad + // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as + // (QuantizationOffset - QuantizationOffset) * scale = 0. + if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8) + { + descriptor.m_PadValue = inputInfo.GetQuantizationOffset(); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPadSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + const Operand* inputOperand = GetInputOperand(operation, 0, model); + const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model); + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + + if (inputOperand == nullptr + || requestedShapeOperand == nullptr + || outputOperand == nullptr) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + if (requestedShapeOperand->dimensions.size() != 1) + { + return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)", + __func__, requestedShapeOperand->dimensions.size()); + } + + std::vector targetDimensions; + if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data)) + { + return Fail("%s: Could not read values of input 1", __func__); + } + + const Shape inputOperandShape = GetOperandShape(*inputOperand); + + Shape requestedShape; + // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility + // function that resolves these values into a fully specified tensor shape. + if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape)) + { + return Fail("%s: Failed to resolve the requested shape", __func__); + } + + const Shape outputOperandShape = GetOperandShape(*outputOperand); + if (!SameShape(requestedShape, outputOperandShape)) + { + return Fail("%s: Shape of output operand does not match resolved requested shape", __func__); + } + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Could not read input 0", __func__); + } + + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(), + requestedShape.dimensions.data()); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + reshapeDescriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSubtractionSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + + if (endLayer) + { + BroadcastTensor(input0, input1, startLayer, *data.m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + + return Fail("%s: ProcessActivation failed", __func__); +} + +template +bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank > 4) + { + Fail("%s: Inputs with rank greater than 4 are not supported", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + if (IsDynamicTensor(GetTensorInfoForOperand(*output))) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure + // if the operand index is out of bounds. + const Operand* axisOperand = GetInputOperand(operation, 1, model, false); + + const uint32_t dimensionSequence[] = { 0, 1, 2, 3 }; + + std::vector axis; + if (!axisOperand) + { + axis.assign(dimensionSequence, + dimensionSequence + rank); + } + else + { + GetTensorInt32Values(*axisOperand, axis, model, data); + } + + std::vector outputDims; + for (unsigned int i = 0; i < rank; i++) + { + bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end()); + auto currentDimension = inputInfo.GetShape()[i]; + if (skipSqueeze || currentDimension != 1) + { + outputDims.push_back(currentDimension); + } + } + + armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data()); + + armnn::TensorInfo outputInfo = inputInfo; + outputInfo.SetShape(outShape); + + armnn::ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = outputInfo.GetShape(); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + inputInfo, + reshapeDesc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank > 4) + { + Fail("%s: Inputs with rank greater than 4 are not supported", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + const Operand* beginOperand = GetInputOperand(operation, 1, model); + const Operand* endOperand = GetInputOperand(operation, 2, model); + const Operand* stridesOperand = GetInputOperand(operation, 3, model); + + std::vector beginValues; + std::vector endValues; + std::vector stridesValues; + + // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input) + auto ValidateInputOperands = [&] (const Operand& operand, std::vector& operandValues) + { + if (!GetTensorInt32Values(operand, operandValues, model, data)) + { + return false; + } + + if (operandValues.size() != rank) + { + return false; + } + + return true; + }; + + if (!ValidateInputOperands(*beginOperand, beginValues) + || !ValidateInputOperands(*endOperand, endValues) + || !ValidateInputOperands(*stridesOperand, stridesValues)) + { + return Fail("%s: Operation has invalid input operand", __func__); + } + + // Stride cannot have value '0' + if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; })) + { + return Fail("%s: Stride must be non-zero value.", __func__); + } + + armnn::StridedSliceDescriptor descriptor; + descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend()); + descriptor.m_End.assign(endValues.cbegin(), endValues.cend()); + descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend()); + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags + if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) || + !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) || + !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsStridedSliceSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank > 4) + { + Fail("%s: Inputs with rank greater than 4 are not supported", __func__); + } + + // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure + // if the operand index is out of bounds. + const Operand* permOperand = GetInputOperand(operation, 1, model, false); + + std::vector perm(rank); + if (!permOperand) + { + // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor + for (unsigned int i = rank; i > 0; i--) + { + perm[rank - i] = boost::numeric_cast (i - 1); + } + } + else + { + GetTensorInt32Values(*permOperand, perm, model, data); + } + + std::vector outputDims(perm.begin(), perm.begin() + rank); + + auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size()); + if (!permutationVector.IsEqual(NHWCToArmNN) + && !permutationVector.IsEqual(ArmNNToNHWC) + && !permutationVector.IsEqual({ 3, 2, 0, 1 })) + { + return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__); + } + + armnn::PermuteDescriptor permuteDesc; + permuteDesc.m_DimMappings = permutationVector; + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPermuteSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + permuteDesc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + template