From 462728090eac533e3122080a86129541df128fe3 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 14 Aug 2019 17:00:48 +0100 Subject: IVGCVSW-3633 Refactor HalPolicy to fully support V1.2 models * Templated and moved V1.0 and V1.1 Convert methods to ensure they can work with later versions of models, operations and operands. * The V1.2 HalPolicy no longer converts V1.2 models, operations and operands to earlier versions. * The V1.2 HalPolicy no longer passes operations to the V1.1 or V1.0 HalPolicies for conversion. Signed-off-by: Mike Kelly Change-Id: I5de59d43a3abb1f8ac0253dc637ad68318960c76 --- 1.0/HalPolicy.cpp | 468 +---------------------- 1.1/HalPolicy.cpp | 374 +----------------- 1.2/HalPolicy.cpp | 220 ++++++----- 1.2/HalPolicy.hpp | 32 +- ConversionUtils.hpp | 1045 +++++++++++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 1165 insertions(+), 974 deletions(-) diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp index 6c8dcb5d..cff678a8 100644 --- a/1.0/HalPolicy.cpp +++ b/1.0/HalPolicy.cpp @@ -74,63 +74,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertAdd()"); - - LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); - LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); - - if (!input0.IsValid() || !input1.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - // The FuseActivation parameter is always the input index 2 - // and it should be optional - ActivationFn activationFunction; - if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* outputOperand = GetOutputOperand(operation, 0, model); - if (!outputOperand) - { - return false; - } - - const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo(); - const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo(); - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsAdditionSupported, - data.m_Backends, - isSupported, - inputInfo0, - inputInfo1, - outputInfo); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer(); - armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); - - if (endLayer != nullptr) - { - BroadcastTensor(input0, input1, startLayer, *data.m_Network); - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); - } - else - { - return Fail("%s: ProcessActivation failed", __func__); - } + return ::ConvertAdd(operation, model, data); } bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data) @@ -160,187 +104,19 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertDequantize()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid input", __func__); - } - - const Operand* const outputOperand = GetOutputOperand(operation, 0, model); - if (!outputOperand) - { - return Fail("%s: Operation has invalid outputs", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsDequantizeSupported, - data.m_Backends, - isSupported, - input.GetTensorInfo(), - GetTensorInfoForOperand(*outputOperand)); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer(); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertDequantize(operation, model, data); } bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertFloor()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* const outputOperand = GetOutputOperand(operation, 0, model); - if (!outputOperand) - { - return Fail("%s: Operation has invalid outputs", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsFloorSupported, - data.m_Backends, - isSupported, - input.GetTensorInfo(), - outputInfo); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer(); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertFloor(operation, model, data); } bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - // ArmNN does not currently support non-fixed weights or bias - ConstTensorPin weightsPin = - ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D - ConstTensorPin biasPin = - ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D - - if (!weightsPin.IsValid() || !biasPin.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - armnn::ConstTensor weights = weightsPin.GetConstTensor(); - armnn::ConstTensor bias = biasPin.GetConstTensor(); - armnn::TensorInfo reshapedInfo = inputInfo; - - try - { - reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape())); - } catch (const std::exception &e) { - return Fail("%s: %s", __func__, e.what()); - } - - // ensuring that the bias value is within 1% of the weights input (small float differences can exist) - SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo); - - ActivationFn activationFunction; - if (!GetInputActivationFunction(operation, 3, activationFunction, model, data)) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - armnn::FullyConnectedDescriptor desc; - desc.m_TransposeWeightMatrix = true; - desc.m_BiasEnabled = true; - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsFullyConnectedSupported, - data.m_Backends, - isSupported, - reshapedInfo, - outputInfo, - weights.GetInfo(), - bias.GetInfo(), - desc); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* startLayer = - data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional(bias)); - armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); - - if (endLayer != nullptr) - { - if (inputInfo.GetNumDimensions() > 2U) - { - armnn::ReshapeDescriptor reshapeDescriptor; - reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape(); - - armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor); - assert(reshapeLayer != nullptr); - input.Connect(reshapeLayer->GetInputSlot(0)); - reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo); - reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0)); - } - else - { - input.Connect(startLayer->GetInputSlot(0)); - } - - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); - } - else - { - return Fail("%s: ProcessActivation failed", __func__); - } + return ::ConvertFullyConnected(operation, model, data); } bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation, @@ -348,74 +124,13 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - armnn::NormalizationDescriptor descriptor; - descriptor.m_DataLayout = armnn::DataLayout::NHWC; - descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across; - descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness; - - if (!input.IsValid() || - !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) || - !GetInputFloat32(operation, 2, descriptor.m_K, model, data) || - !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) || - !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data)) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - // ArmNN expects normSize to be the full size of the normalization - // window rather than the radius as in AndroidNN. - descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize); - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsNormalizationSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); - if (!isSupported) - { - return false; - } - - - armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertLocalResponseNormalization(operation, model, data); } bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertLogistic()"); - - armnn::ActivationDescriptor desc; - desc.m_Function = armnn::ActivationFunction::Sigmoid; - - return ConvertToActivation(operation, __func__, desc, model, data); + return ::ConvertLogistic(operation, model, data); } bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data) @@ -775,48 +490,7 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - armnn::L2NormalizationDescriptor desc; - desc.m_DataLayout = armnn::DataLayout::NHWC; - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsL2NormalizationSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - desc); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertL2Normalization(operation, model, data); } bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data) @@ -834,64 +508,7 @@ bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertMul()"); - - LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); - LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); - - if (!input0.IsValid() || !input1.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - // The FuseActivation parameter is always the input index 2 - // and it should be optional - ActivationFn activationFunction; - if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* outputOperand = GetOutputOperand(operation, 0, model); - - if (outputOperand == nullptr) - { - return false; - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsMultiplicationSupported, - data.m_Backends, - isSupported, - input0.GetTensorInfo(), - input1.GetTensorInfo(), - outputInfo); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer(); - armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); - - const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); - const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); - - if (endLayer != nullptr) - { - BroadcastTensor(input0, input1, startLayer, *data.m_Network); - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); - } - else - { - return Fail("%s: ProcessActivation failed", __func__); - } + return ::ConvertMul(operation, model, data); } bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data) @@ -1029,74 +646,7 @@ bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, Conv bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_0::HalPolicy::ConvertReshape()"); - - const Operand* inputOperand = GetInputOperand(operation, 0, model); - const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model); - const Operand* outputOperand = GetOutputOperand(operation, 0, model); - - if (inputOperand == nullptr - || requestedShapeOperand == nullptr - || outputOperand == nullptr) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - - if (requestedShapeOperand->dimensions.size() != 1) - { - return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)", - __func__, requestedShapeOperand->dimensions.size()); - } - - std::vector targetDimensions; - if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data)) - { - return Fail("%s: Could not read values of input 1", __func__); - } - - const Shape inputOperandShape = GetOperandShape(*inputOperand); - - Shape requestedShape; - // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility - // function that resolves these values into a fully specified tensor shape. - if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape)) - { - return Fail("%s: Failed to resolve the requested shape", __func__); - } - - const Shape outputOperandShape = GetOperandShape(*outputOperand); - if (!SameShape(requestedShape, outputOperandShape)) - { - return Fail("%s: Shape of output operand does not match resolved requested shape", __func__); - } - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Could not read input 0", __func__); - } - - armnn::ReshapeDescriptor reshapeDescriptor; - reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(), - requestedShape.dimensions.data()); - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsReshapeSupported, - data.m_Backends, - isSupported, - input.GetTensorInfo(), - reshapeDescriptor); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertReshape(operation, model, data); } bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data) diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index e75b5c2a..aa650e90 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -106,61 +106,7 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_1::HalPolicy::ConvertDiv()"); - - LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); - LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); - - if (!input0.IsValid() || !input1.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - // The FuseActivation parameter is always the input index 2 - // and it should be optional - ActivationFn activationFunction; - if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsDivisionSupported, - data.m_Backends, - isSupported, - input0.GetTensorInfo(), - input1.GetTensorInfo(), - outputInfo); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer(); - armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); - - const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); - const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); - - if (endLayer) - { - BroadcastTensor(input0, input1, startLayer, *data.m_Network); - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); - } - - return Fail("%s: ProcessActivation failed", __func__); + return ::ConvertDiv(operation, model, data); } bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data) @@ -172,75 +118,7 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_1::HalPolicy::ConvertMean()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - const Operand* axisOperand = GetInputOperand(operation, 1, model); - if (!axisOperand) - { - return Fail("%s: Could not read input 1", __func__); - } - - std::vector axis; - if (!GetTensorInt32Values(*axisOperand, axis, model, data)) - { - return Fail("%s: Input 1 has invalid values", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - - // Convert the axis to unsigned int and remove duplicates. - unsigned int rank = inputInfo.GetNumDimensions(); - std::set uniqueAxis; - std::transform(axis.begin(), axis.end(), - std::inserter(uniqueAxis, uniqueAxis.begin()), - [rank](int i) -> unsigned int { return (i + rank) % rank; }); - - // Get the "keep dims" flag. - int32_t keepDims = 0; - if (!GetInputInt32(operation, 2, keepDims, model, data)) - { - return Fail("%s: Could not read input 2", __func__); - } - - armnn::MeanDescriptor descriptor; - descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end()); - descriptor.m_KeepDims = keepDims > 0; - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsMeanSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertMean(operation, model, data); } bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data) @@ -258,261 +136,19 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_1::HalPolicy::ConvertSqueeze()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - unsigned int rank = inputInfo.GetNumDimensions(); - if (rank > 4) - { - Fail("%s: Inputs with rank greater than 4 are not supported", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - if (IsDynamicTensor(GetTensorInfoForOperand(*output))) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure - // if the operand index is out of bounds. - const Operand* axisOperand = GetInputOperand(operation, 1, model, false); - - const uint32_t dimensionSequence[] = { 0, 1, 2, 3 }; - - std::vector axis; - if (!axisOperand) - { - axis.assign(dimensionSequence, - dimensionSequence + rank); - } - else - { - GetTensorInt32Values(*axisOperand, axis, model, data); - } - - - std::vector outputDims; - for (unsigned int i = 0; i < rank; i++) - { - bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end()); - auto currentDimension = inputInfo.GetShape()[i]; - if (skipSqueeze || currentDimension != 1) - { - outputDims.push_back(currentDimension); - } - } - - armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data()); - - armnn::TensorInfo outputInfo = inputInfo; - outputInfo.SetShape(outShape); - - armnn::ReshapeDescriptor reshapeDesc; - reshapeDesc.m_TargetShape = outputInfo.GetShape(); - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsReshapeSupported, - data.m_Backends, - isSupported, - inputInfo, - reshapeDesc); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertSqueeze(operation, model, data); } bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_1::HalPolicy::ConvertStridedSlice()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - unsigned int rank = inputInfo.GetNumDimensions(); - if (rank > 4) - { - Fail("%s: Inputs with rank greater than 4 are not supported", __func__); - } - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - - const Operand* beginOperand = GetInputOperand(operation, 1, model); - const Operand* endOperand = GetInputOperand(operation, 2, model); - const Operand* stridesOperand = GetInputOperand(operation, 3, model); - - std::vector beginValues; - std::vector endValues; - std::vector stridesValues; - - // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input) - auto ValidateInputOperands = [&] (const Operand& operand, std::vector& operandValues) - { - if (!GetTensorInt32Values(operand, operandValues, model, data)) - { - return false; - } - - if (operandValues.size() != rank) - { - return false; - } - - return true; - }; - - if (!ValidateInputOperands(*beginOperand, beginValues) - || !ValidateInputOperands(*endOperand, endValues) - || !ValidateInputOperands(*stridesOperand, stridesValues)) - { - return Fail("%s: Operation has invalid input operand", __func__); - } - - // Stride cannot have value '0' - if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; })) - { - return Fail("%s: Stride must be non-zero value.", __func__); - } - - armnn::StridedSliceDescriptor descriptor; - descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend()); - descriptor.m_End.assign(endValues.cbegin(), endValues.cend()); - descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend()); - descriptor.m_DataLayout = armnn::DataLayout::NHWC; - - // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags - if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) || - !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) || - !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data)) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsStridedSliceSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertStridedSlice(operation, model, data); } bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_1::HalPolicy::ConvertTranspose()"); - - LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); - if (!input.IsValid()) - { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - unsigned int rank = inputInfo.GetNumDimensions(); - if (rank > 4) - { - Fail("%s: Inputs with rank greater than 4 are not supported", __func__); - } - - // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure - // if the operand index is out of bounds. - const Operand* permOperand = GetInputOperand(operation, 1, model, false); - - std::vector perm(rank); - if (!permOperand) - { - // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor - for (unsigned int i = rank; i > 0; i--) - { - perm[rank - i] = boost::numeric_cast (i - 1); - } - } - else - { - GetTensorInt32Values(*permOperand, perm, model, data); - } - - std::vector outputDims(perm.begin(), perm.begin() + rank); - - auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size()); - if (!permutationVector.IsEqual(NHWCToArmNN) - && !permutationVector.IsEqual(ArmNNToNHWC) - && !permutationVector.IsEqual({ 3, 2, 0, 1 })) - { - return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__); - } - - armnn::PermuteDescriptor permuteDesc; - permuteDesc.m_DimMappings = permutationVector; - - const Operand* output = GetOutputOperand(operation, 0, model); - if (!output) - { - return Fail("%s: Could not read output 0", __func__); - } - - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsPermuteSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - permuteDesc); - if (!isSupported) - { - return false; - } - - armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return ::ConvertTranspose(operation, model, data); } bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index c8e242e0..7fe5f88e 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -7,9 +7,6 @@ #include "Utils.hpp" -#include "../1.0/HalPolicy.hpp" -#include "../1.1/HalPolicy.hpp" - #include #include @@ -20,109 +17,12 @@ namespace armnn_driver namespace hal_1_2 { -bool HandledByV1_0(V1_2::OperationType operationType) -{ - switch (static_cast(operationType)) - { - case V1_0::OperationType::ADD: - case V1_0::OperationType::DEPTH_TO_SPACE: - case V1_0::OperationType::DEQUANTIZE: - case V1_0::OperationType::EMBEDDING_LOOKUP: - case V1_0::OperationType::FLOOR: - case V1_0::OperationType::FULLY_CONNECTED: - case V1_0::OperationType::HASHTABLE_LOOKUP: - case V1_0::OperationType::L2_NORMALIZATION: - case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION: - case V1_0::OperationType::LOGISTIC: - case V1_0::OperationType::LSH_PROJECTION: - case V1_0::OperationType::MUL: - case V1_0::OperationType::RESHAPE: - case V1_0::OperationType::RNN: - case V1_0::OperationType::SVDF: - case V1_0::OperationType::OEM_OPERATION: - return true; - default: - return false; - } -} - -bool HandledByV1_1(V1_2::OperationType operationType) -{ - if (HandledByV1_0(operationType)) - { - return true; - } - switch (static_cast(operationType)) - { - case V1_1::OperationType::DIV: - case V1_1::OperationType::MEAN: - case V1_1::OperationType::SQUEEZE: - case V1_1::OperationType::STRIDED_SLICE: - case V1_1::OperationType::TRANSPOSE: - return true; - default: - return false; - } -} - -bool HandledByV1_0(const V1_2::Operation& operation) -{ - return HandledByV1_0(operation.type); -} - -bool HandledByV1_1(const V1_2::Operation& operation) -{ - return HandledByV1_1(operation.type); -} - -V1_0::OperationType CastToV1_0(V1_2::OperationType type) -{ - return static_cast(type); -} - -V1_1::OperationType CastToV1_1(V1_2::OperationType type) -{ - return static_cast(type); -} - -V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation) -{ - V1_0::Operation op; - op.type = CastToV1_0(operation.type); - op.inputs = operation.inputs; - op.outputs = operation.outputs; - return op; -} - -V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation) -{ - V1_1::Operation op; - op.type = CastToV1_1(operation.type); - op.inputs = operation.inputs; - op.outputs = operation.outputs; - return op; -} - bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data) { - if (HandledByV1_0(operation) && compliantWithV1_0(model)) - { - hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation); - hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model); - - return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data); - } - - if (HandledByV1_1(operation) && compliantWithV1_1(model)) - { - hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation); - hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model); - - return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data); - } - switch (operation.type) { + case V1_2::OperationType::ADD: + return ConvertAdd(operation, model, data); case V1_2::OperationType::AVERAGE_POOL_2D: return ConvertAveragePool2d(operation, model, data); case V1_2::OperationType::BATCH_TO_SPACE_ND: @@ -133,14 +33,34 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertConv2d(operation, model, data); case V1_2::OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation, model, data); + case V1_2::OperationType::DEQUANTIZE: + return ConvertDequantize(operation, model, data); + case V1_2::OperationType::DIV: + return ConvertDiv(operation, model, data); + case V1_2::OperationType::FLOOR: + return ConvertFloor(operation, model, data); + case V1_2::OperationType::FULLY_CONNECTED: + return ConvertFullyConnected(operation, model, data); + case V1_2::OperationType::L2_NORMALIZATION: + return ConvertL2Normalization(operation, model, data); case V1_2::OperationType::L2_POOL_2D: return ConvertL2Pool2d(operation, model, data); + case V1_2::OperationType::LOCAL_RESPONSE_NORMALIZATION: + return ConvertLocalResponseNormalization(operation, model, data); + case V1_2::OperationType::LOGISTIC: + return ConvertLogistic(operation, model, data); + case V1_2::OperationType::LSTM: + return ConvertLstm(operation, model, data); case V1_2::OperationType::MAX_POOL_2D: return ConvertMaxPool2d(operation, model, data); case V1_2::OperationType::MAXIMUM: return ConvertMaximum(operation, model, data); + case V1_2::OperationType::MEAN: + return ConvertMean(operation, model, data); case V1_2::OperationType::MINIMUM: return ConvertMinimum(operation, model, data); + case V1_2::OperationType::MUL: + return ConvertMul(operation, model, data); case V1_2::OperationType::PAD: return ConvertPad(operation, model, data); case V1_2::OperationType::PAD_V2: @@ -157,10 +77,18 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertReLu1(operation, model, data); case V1_2::OperationType::RELU6: return ConvertReLu6(operation, model, data); + case V1_2::OperationType::RESHAPE: + return ConvertReshape(operation, model, data); case V1_2::OperationType::RESIZE_BILINEAR: return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear); case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR: return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor); + case V1_2::OperationType::SQUEEZE: + return ConvertSqueeze(operation, model, data); + case V1_2::OperationType::STRIDED_SLICE: + return ConvertStridedSlice(operation, model, data); + case V1_2::OperationType::TRANSPOSE: + return ConvertTranspose(operation, model, data); case V1_2::OperationType::TRANSPOSE_CONV_2D: return ConvertTransposeConv2d(operation, model, data); case V1_2::OperationType::SOFTMAX: @@ -173,14 +101,18 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertSub(operation, model, data); case V1_2::OperationType::TANH: return ConvertTanH(operation, model, data); - case V1_2::OperationType::LSTM: - return ConvertLstm(operation, model, data); default: return Fail("%s: Operation type %s not supported in ArmnnDriver", __func__, toString(operation.type).c_str()); } } +bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertAdd()"); + return ::ConvertAdd(operation, model, data); +} + bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()"); @@ -517,12 +449,56 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } +bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertDequantize()"); + return ::ConvertDequantize(operation, model, data); +} + +bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertDiv()"); + return ::ConvertDiv(operation, model, data); +} + +bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertFloor()"); + return ::ConvertFloor(operation, model, data); +} + +bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertFullyConnected()"); + return ::ConvertFullyConnected(operation, model, data); +} + +bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertL2Normalization()"); + return ::ConvertL2Normalization(operation, model, data); +} + bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()"); return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2, model, data); } +bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation, + const Model& model, + ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertLocalResponseNormalization()"); + return ::ConvertLocalResponseNormalization(operation, model, data); +} + +bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertLogistic()"); + return ::ConvertLogistic(operation, model, data); +} + bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()"); @@ -574,6 +550,12 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertMean()"); + return ::ConvertMean(operation, model, data); +} + bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertMinimum()"); @@ -619,6 +601,12 @@ bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, C return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } +bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertMul()"); + return ::ConvertMul(operation, model, data); +} + bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data) { ALOGV("hal_1_2::HalPolicy::ConvertPad()"); @@ -1039,6 +1027,12 @@ bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, Con return ::ConvertReLu6(operation, model, data); } +bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_2::HalPolicy::ConvertReshape()"); + return ::ConvertReshape(operation, model, data); +} + bool HalPolicy::ConvertResize(const Operation& operation, const Model& model, ConversionData& data, @@ -1733,6 +1727,24 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data)); } +bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_1::HalPolicy::ConvertSqueeze()"); + return ::ConvertSqueeze(operation, model, data); +} + +bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_1::HalPolicy::ConvertStridedSlice()"); + return ::ConvertStridedSlice(operation, model, data); +} + +bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data) +{ + ALOGV("hal_1_1::HalPolicy::ConvertTranspose()"); + return ::ConvertTranspose(operation, model, data); +} + bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data) { LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp index a268b3de..4a785d9e 100644 --- a/1.2/HalPolicy.hpp +++ b/1.2/HalPolicy.hpp @@ -31,6 +31,8 @@ public: static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data); private: + static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data); @@ -41,14 +43,36 @@ private: static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data); + + static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data); + + static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data); + + static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data); + + static bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertLocalResponseNormalization(const Operation& operation, + const Model& model, + ConversionData& data); + + static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data); + + static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data); @@ -65,6 +89,8 @@ private: static bool ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertResize(const Operation& operation, const Model& model, ConversionData& data, @@ -76,11 +102,15 @@ private: static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data); + + static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data); - static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data); + static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data); static bool ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data); }; diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 32efa540..cfbef5a8 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -14,6 +14,8 @@ #include "armnn/src/armnnUtils/DataLayoutIndexed.hpp" #include "armnn/src/armnnUtils/Permute.hpp" +#include "1.0/FullyConnected.hpp" + #include #include #include @@ -341,6 +343,20 @@ Shape GetOperandShape(const V1_0::Operand& operand) return shape; } +#ifdef ARMNN_ANDROID_NN_V1_2 + +Shape GetOperandShape(const V1_2::Operand& operand) +{ + Shape shape; + shape.type = OperandType(operand.type); + shape.dimensions = operand.dimensions; + shape.scale = operand.scale; + shape.offset = operand.zeroPoint; + return shape; +} + +#endif + // ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also // what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so // we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the @@ -1417,6 +1433,71 @@ bool ConvertPooling2d(const HalOperation& operation, return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } +template +bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) + { + return false; + } + + const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo(); + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsAdditionSupported, + data.m_Backends, + isSupported, + inputInfo0, + inputInfo1, + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + if (endLayer != nullptr) + { + BroadcastTensor(input0, input1, startLayer, *data.m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + else + { + return Fail("%s: ProcessActivation failed", __func__); + } +} + template @@ -1918,41 +1999,25 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model } template -bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data) + typename Operation = typename HalPolicy::Operation, + typename Model = typename HalPolicy::Model> +bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data) { + using Operand = typename HalPolicy::Operand; + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); if (!input.IsValid()) { - return Fail("%s: Operation has invalid inputs", __func__); - } - - const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); - unsigned int rank = inputInfo.GetNumDimensions(); - - armnn::PadDescriptor descriptor; - if (!ConvertPaddings(operation, model, data, rank, descriptor)) - { - return Fail("%s: Could not convert paddings", __func__); - } - - // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad - // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as - // (QuantizationOffset - QuantizationOffset) * scale = 0. - if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8) - { - descriptor.m_PadValue = inputInfo.GetQuantizationOffset(); + return Fail("%s: Operation has invalid input", __func__); } - const HalOperand* output = GetOutputOperand(operation, 0, model); - if (!output) + const Operand* const outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) { - return Fail("%s: Could not read output", __func__); + return Fail("%s: Operation has invalid outputs", __func__); } - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); if (IsDynamicTensor(outputInfo)) { return Fail("%s: Dynamic output tensors are not supported", __func__); @@ -1960,31 +2025,30 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsPadSupported, + IsDequantizeSupported, data.m_Backends, isSupported, - inputInfo, - outputInfo, - descriptor); + input.GetTensorInfo(), + GetTensorInfoForOperand(*outputOperand)); if (!isSupported) { return false; } - armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); + armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer(); assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - layer->GetOutputSlot(0).SetTensorInfo(outputInfo); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } template -bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data) +bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data) { + using Operand = typename HalPolicy::Operand; + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); @@ -2015,7 +2079,7 @@ bool ConvertSub(const Operation& operation, const Model& model, ConversionData& bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsSubtractionSupported, + IsDivisionSupported, data.m_Backends, isSupported, input0.GetTensorInfo(), @@ -2026,21 +2090,920 @@ bool ConvertSub(const Operation& operation, const Model& model, ConversionData& return false; } - armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer(); - armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); - - const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); - const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); if (endLayer) { BroadcastTensor(input0, input1, startLayer, *data.m_Network); return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); } - return Fail("%s: ProcessActivation failed", __func__); } +template +bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* const outputOperand = GetOutputOperand(operation, 0, model); + if (!outputOperand) + { + return Fail("%s: Operation has invalid outputs", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFloorSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer(); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + // ArmNN does not currently support non-fixed weights or bias + ConstTensorPin weightsPin = + ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D + ConstTensorPin biasPin = + ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D + + if (!weightsPin.IsValid() || !biasPin.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + armnn::ConstTensor weights = weightsPin.GetConstTensor(); + armnn::ConstTensor bias = biasPin.GetConstTensor(); + armnn::TensorInfo reshapedInfo = inputInfo; + + try + { + reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape())); + } catch (const std::exception &e) { + return Fail("%s: %s", __func__, e.what()); + } + + // ensuring that the bias value is within 1% of the weights input (small float differences can exist) + SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo); + + ActivationFn activationFunction; + if (!GetInputActivationFunction(operation, 3, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + armnn::FullyConnectedDescriptor desc; + desc.m_TransposeWeightMatrix = true; + desc.m_BiasEnabled = true; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFullyConnectedSupported, + data.m_Backends, + isSupported, + reshapedInfo, + outputInfo, + weights.GetInfo(), + bias.GetInfo(), + desc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* startLayer = + data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional(bias)); + armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + if (endLayer != nullptr) + { + if (inputInfo.GetNumDimensions() > 2U) + { + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape(); + + armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor); + assert(reshapeLayer != nullptr); + input.Connect(reshapeLayer->GetInputSlot(0)); + reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo); + reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0)); + } + else + { + input.Connect(startLayer->GetInputSlot(0)); + } + + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + else + { + return Fail("%s: ProcessActivation failed", __func__); + } +} + +template +bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + if (outputInfo.GetNumDimensions() != 4u) + { + return Fail("%s: Tensor Rank other than 4 is not supported", __func__); + } + + armnn::L2NormalizationDescriptor desc; + desc.m_DataLayout = armnn::DataLayout::NHWC; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsL2NormalizationSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertLocalResponseNormalization(const Operation& operation, + const Model& model, + ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + using OperandType = typename HalPolicy::OperandType; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + if (outputInfo.GetNumDimensions() != 4u) + { + return Fail("%s: Tensor Rank other than 4 is not supported", __func__); + } + + armnn::NormalizationDescriptor descriptor; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across; + descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness; + + if (!input.IsValid() || + !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) || + !GetInputFloat32(operation, 2, descriptor.m_K, model, data) || + !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) || + !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // ArmNN expects normSize to be the full size of the normalization + // window rather than the radius as in AndroidNN. + descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsNormalizationSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + + armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + armnn::ActivationDescriptor desc; + desc.m_Function = armnn::ActivationFunction::Sigmoid; + + return ConvertToActivation(operation, __func__, desc, model, data); +} + +template +bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + const Operand* axisOperand = GetInputOperand(operation, 1, model); + if (!axisOperand) + { + return Fail("%s: Could not read input 1", __func__); + } + + std::vector axis; + if (!GetTensorInt32Values(*axisOperand, axis, model, data)) + { + return Fail("%s: Input 1 has invalid values", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + + // Convert the axis to unsigned int and remove duplicates. + unsigned int rank = inputInfo.GetNumDimensions(); + std::set uniqueAxis; + std::transform(axis.begin(), axis.end(), + std::inserter(uniqueAxis, uniqueAxis.begin()), + [rank](int i) -> unsigned int { return (i + rank) % rank; }); + + // Get the "keep dims" flag. + int32_t keepDims = 0; + if (!GetInputInt32(operation, 2, keepDims, model, data)) + { + return Fail("%s: Could not read input 2", __func__); + } + + armnn::MeanDescriptor descriptor; + descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end()); + descriptor.m_KeepDims = keepDims > 0; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsMeanSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + + if (outputOperand == nullptr) + { + return false; + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsMultiplicationSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + + if (endLayer != nullptr) + { + BroadcastTensor(input0, input1, startLayer, *data.m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + else + { + return Fail("%s: ProcessActivation failed", __func__); + } +} + +template +bool ConvertPad(Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + + armnn::PadDescriptor descriptor; + if (!ConvertPaddings(operation, model, data, rank, descriptor)) + { + return Fail("%s: Could not convert paddings", __func__); + } + + // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad + // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as + // (QuantizationOffset - QuantizationOffset) * scale = 0. + if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8) + { + descriptor.m_PadValue = inputInfo.GetQuantizationOffset(); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPadSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + const Operand* inputOperand = GetInputOperand(operation, 0, model); + const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model); + const Operand* outputOperand = GetOutputOperand(operation, 0, model); + + if (inputOperand == nullptr + || requestedShapeOperand == nullptr + || outputOperand == nullptr) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + if (requestedShapeOperand->dimensions.size() != 1) + { + return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)", + __func__, requestedShapeOperand->dimensions.size()); + } + + std::vector targetDimensions; + if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data)) + { + return Fail("%s: Could not read values of input 1", __func__); + } + + const Shape inputOperandShape = GetOperandShape(*inputOperand); + + Shape requestedShape; + // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility + // function that resolves these values into a fully specified tensor shape. + if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape)) + { + return Fail("%s: Failed to resolve the requested shape", __func__); + } + + const Shape outputOperandShape = GetOperandShape(*outputOperand); + if (!SameShape(requestedShape, outputOperandShape)) + { + return Fail("%s: Shape of output operand does not match resolved requested shape", __func__); + } + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Could not read input 0", __func__); + } + + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(), + requestedShape.dimensions.data()); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + reshapeDescriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data); + LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data); + + if (!input0.IsValid() || !input1.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + // The FuseActivation parameter is always the input index 2 + // and it should be optional + ActivationFn activationFunction; + if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSubtractionSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outputInfo); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer(); + armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data); + + const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo(); + const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo(); + + if (endLayer) + { + BroadcastTensor(input0, input1, startLayer, *data.m_Network); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + } + + return Fail("%s: ProcessActivation failed", __func__); +} + +template +bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank > 4) + { + Fail("%s: Inputs with rank greater than 4 are not supported", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + if (IsDynamicTensor(GetTensorInfoForOperand(*output))) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure + // if the operand index is out of bounds. + const Operand* axisOperand = GetInputOperand(operation, 1, model, false); + + const uint32_t dimensionSequence[] = { 0, 1, 2, 3 }; + + std::vector axis; + if (!axisOperand) + { + axis.assign(dimensionSequence, + dimensionSequence + rank); + } + else + { + GetTensorInt32Values(*axisOperand, axis, model, data); + } + + std::vector outputDims; + for (unsigned int i = 0; i < rank; i++) + { + bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end()); + auto currentDimension = inputInfo.GetShape()[i]; + if (skipSqueeze || currentDimension != 1) + { + outputDims.push_back(currentDimension); + } + } + + armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data()); + + armnn::TensorInfo outputInfo = inputInfo; + outputInfo.SetShape(outShape); + + armnn::ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = outputInfo.GetShape(); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + inputInfo, + reshapeDesc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank > 4) + { + Fail("%s: Inputs with rank greater than 4 are not supported", __func__); + } + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } + + const Operand* beginOperand = GetInputOperand(operation, 1, model); + const Operand* endOperand = GetInputOperand(operation, 2, model); + const Operand* stridesOperand = GetInputOperand(operation, 3, model); + + std::vector beginValues; + std::vector endValues; + std::vector stridesValues; + + // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input) + auto ValidateInputOperands = [&] (const Operand& operand, std::vector& operandValues) + { + if (!GetTensorInt32Values(operand, operandValues, model, data)) + { + return false; + } + + if (operandValues.size() != rank) + { + return false; + } + + return true; + }; + + if (!ValidateInputOperands(*beginOperand, beginValues) + || !ValidateInputOperands(*endOperand, endValues) + || !ValidateInputOperands(*stridesOperand, stridesValues)) + { + return Fail("%s: Operation has invalid input operand", __func__); + } + + // Stride cannot have value '0' + if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; })) + { + return Fail("%s: Stride must be non-zero value.", __func__); + } + + armnn::StridedSliceDescriptor descriptor; + descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend()); + descriptor.m_End.assign(endValues.cbegin(), endValues.cend()); + descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend()); + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags + if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) || + !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) || + !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data)) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsStridedSliceSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + +template +bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data) +{ + using Operand = typename HalPolicy::Operand; + + LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data); + if (!input.IsValid()) + { + return Fail("%s: Operation has invalid inputs", __func__); + } + + const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); + unsigned int rank = inputInfo.GetNumDimensions(); + if (rank > 4) + { + Fail("%s: Inputs with rank greater than 4 are not supported", __func__); + } + + // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure + // if the operand index is out of bounds. + const Operand* permOperand = GetInputOperand(operation, 1, model, false); + + std::vector perm(rank); + if (!permOperand) + { + // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor + for (unsigned int i = rank; i > 0; i--) + { + perm[rank - i] = boost::numeric_cast (i - 1); + } + } + else + { + GetTensorInt32Values(*permOperand, perm, model, data); + } + + std::vector outputDims(perm.begin(), perm.begin() + rank); + + auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size()); + if (!permutationVector.IsEqual(NHWCToArmNN) + && !permutationVector.IsEqual(ArmNNToNHWC) + && !permutationVector.IsEqual({ 3, 2, 0, 1 })) + { + return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__); + } + + armnn::PermuteDescriptor permuteDesc; + permuteDesc.m_DimMappings = permutationVector; + + const Operand* output = GetOutputOperand(operation, 0, model); + if (!output) + { + return Fail("%s: Could not read output 0", __func__); + } + + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPermuteSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + permuteDesc); + if (!isSupported) + { + return false; + } + + armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc); + assert(layer != nullptr); + input.Connect(layer->GetInputSlot(0)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); +} + template