From 0bd4c6230974e7e446cd26104180d520b643d5bb Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 27 Apr 2023 11:48:26 +0100 Subject: IVGCVSW-7574 IVGCVSW-7590 IVGCVSW-7600 Implement Activation, FullyConnected and Prelu operators for Opaque Delegate * Added missing headers to opaque/CMakeLists.txt (Control and Comparison) * Cleaned up Control.hpp headers. Signed-off-by: Matthew Sloyan Change-Id: I442edb9c467b515b130fbaf02879f0802006255f --- delegate/CMakeLists.txt | 6 + delegate/opaque/CMakeLists.txt | 5 + delegate/opaque/src/Activation.hpp | 147 +++++++++++++++++ delegate/opaque/src/Control.hpp | 12 +- delegate/opaque/src/FullyConnected.hpp | 291 +++++++++++++++++++++++++++++++++ delegate/opaque/src/Prelu.hpp | 121 ++++++++++++++ delegate/opaque/src/armnn_delegate.cpp | 54 ++++++ 7 files changed, 625 insertions(+), 11 deletions(-) diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index 06c317a03d..3914a24fb5 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -259,6 +259,8 @@ if(BUILD_UNIT_TESTS) common/src/test/DelegateTestInterpreterUtils.hpp opaque/src/test/ArmnnOpaqueDelegateTest.cpp opaque/src/test/DelegateTestInterpreter.cpp + test/ActivationTest.cpp + test/ActivationTestHelper.hpp test/ArgMinMaxTest.cpp test/ArgMinMaxTestHelper.hpp test/BatchSpaceTest.cpp @@ -272,10 +274,14 @@ if(BUILD_UNIT_TESTS) test/Convolution2dTest.cpp test/ConvolutionTestHelper.hpp test/DepthwiseConvolution2dTest.cpp + test/FullyConnectedTest.cpp + test/FullyConnectedTestHelper.hpp test/GatherTest.cpp test/GatherTestHelper.hpp test/GatherNdTest.cpp test/GatherNdTestHelper.hpp + test/PreluTest.cpp + test/PreluTestHelper.hpp test/TestUtils.hpp test/TestUtils.cpp) diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt index ee364197e5..5a248f9c2e 100644 --- a/delegate/opaque/CMakeLists.txt +++ b/delegate/opaque/CMakeLists.txt @@ -7,12 +7,17 @@ set(armnnOpaqueDelegateObject_sources) list(APPEND armnnOpaqueDelegateObject_sources include/armnn_delegate.hpp include/Version.hpp + src/Activation.hpp src/ArgMinMax.hpp src/armnn_delegate.cpp src/BatchSpace.hpp + src/Comparison.hpp + src/Control.hpp src/Convolution.hpp + src/FullyConnected.hpp src/Gather.hpp src/GatherNd.hpp + src/Prelu.hpp src/Redefine.hpp src/SharedFunctions.cpp src/SharedFunctions.hpp) diff --git a/delegate/opaque/src/Activation.hpp b/delegate/opaque/src/Activation.hpp index e16969768e..a45bba95a9 100644 --- a/delegate/opaque/src/Activation.hpp +++ b/delegate/opaque/src/Activation.hpp @@ -2,3 +2,150 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus ValidateActivationOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + armnn::ActivationDescriptor& activationDesc) +{ + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ACTIVATION", + tfLiteContext, + IsActivationSupported, + delegateData.m_Backends, + isSupported, + armnn::BackendId(), + inputInfo, + outputInfo, + activationDesc); + }; + + validateFunc(outputInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus VisitActivationOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensor. + int numInputs = 0; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + armnn::ActivationDescriptor activationDesc; + switch(operatorCode) + { + case kTfLiteBuiltinRelu: + { + activationDesc.m_Function = armnn::ActivationFunction::ReLu; + break; + } + case kTfLiteBuiltinRelu6: + { + activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu; + activationDesc.m_A = 6.0f; + break; + } + case kTfLiteBuiltinLogistic: + { + activationDesc.m_Function = armnn::ActivationFunction::Sigmoid; + break; + } + case kTfLiteBuiltinTanh: + { + activationDesc.m_Function = armnn::ActivationFunction::TanH; + activationDesc.m_A = 1.0f; + activationDesc.m_B = 1.0f; + break; + } + case kTfLiteBuiltinElu: + { + activationDesc.m_Function = armnn::ActivationFunction::Elu; + activationDesc.m_A = 1.0f; + break; + } + case kTfLiteBuiltinHardSwish: + { + activationDesc.m_Function = armnn::ActivationFunction::HardSwish; + break; + } + default: + { + return kTfLiteError; + } + } + if (!delegateData.m_Network) + { + return ValidateActivationOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + activationDesc); + } + armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc); + ARMNN_ASSERT(activationLayer != nullptr); + + armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) + { + return kTfLiteError; + } + + // Connect + return Connect(activationLayer, tfLiteContext, tfLiteNode, delegateData); +} + +} // namespace armnnDelegate diff --git a/delegate/opaque/src/Control.hpp b/delegate/opaque/src/Control.hpp index b3d589756b..abc6b6fb0e 100644 --- a/delegate/opaque/src/Control.hpp +++ b/delegate/opaque/src/Control.hpp @@ -6,17 +6,7 @@ #pragma once #include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include +#include namespace armnnOpaqueDelegate { diff --git a/delegate/opaque/src/FullyConnected.hpp b/delegate/opaque/src/FullyConnected.hpp index e16969768e..3282cab543 100644 --- a/delegate/opaque/src/FullyConnected.hpp +++ b/delegate/opaque/src/FullyConnected.hpp @@ -2,3 +2,294 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode); + if (numInputs < 2) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d", + 2, numInputs, nodeIndex); + return kTfLiteError; + } + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensor. + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteWeightsTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]); + if (!IsValid(tfLiteContext, tfLiteWeightsTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& weightsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteWeightsTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + // Check that we support fused activation before we attempt to create a layer + auto* tfLiteNodeParameters = + reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + TfLiteFusedActivation activationType=kTfLiteActNone; + if (tfLiteNodeParameters) + { + activationType = tfLiteNodeParameters->activation; + TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, + outputTensorInfo, activationType); + if(activationStatus != kTfLiteOk) + { + return kTfLiteError; + } + } + + // Fully Connected Layer accepts two dimensional weights input + int32_t weightsDimension = static_cast(weightsTensorInfo.GetNumDimensions()); + if (weightsDimension != 2) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dimension #$d for Fully Connected weights is not supported by Armnn" + " in operator #%d node #%d: ", weightsDimension, operatorCode, nodeIndex); + return kTfLiteError; + } + + armnn::TensorInfo biasTensorInfo; + const TfLiteOpaqueTensor* tfLiteBiasTensor = nullptr; + + bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2); + if (biasEnabled) + { + // Use input indices to get bias tensor. + tfLiteBiasTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]); + if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + biasTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBiasTensor); + } + else + { + biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor)); + } + + armnn::TensorInfo reshapedTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + if (inputTensorInfo.GetNumDimensions() > 2) + { + // Calculate reshape to flatten to 2D [batch_size, input_size] + std::vector reshapedDimensions(2); + reshapedDimensions[1] = weightsTensorInfo.GetShape()[1]; + reshapedDimensions[0] = inputTensorInfo.GetNumElements() / reshapedDimensions[1]; + + if (inputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Failed to deduce input tensor shape from filter size #%d #%d node #%d: ", + reshapedDimensions[1], operatorCode, nodeIndex); + return kTfLiteError; + } + + reshapedTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() }); + } + armnn::TensorInfo reshapedOutputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor); + + if (outputTensorInfo.GetNumDimensions() > 2) + { + // Calculate reshape to flatten to 2D [batch_size, input_size] + std::vector reshapedDimensions(2); + reshapedDimensions[1] = weightsTensorInfo.GetShape()[0]; + reshapedDimensions[0] = outputTensorInfo.GetNumElements() / reshapedDimensions[1]; + + if (outputTensorInfo.GetNumElements() % reshapedDimensions[1] != 0) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Failed to deduce output tensor shape from filter size #%d #%d node #%d: ", + reshapedDimensions[1], operatorCode, nodeIndex); + return kTfLiteError; + } + reshapedOutputTensorInfo.SetShape(armnn::TensorShape{ 2, reshapedDimensions.data() }); + } + + armnn::FullyConnectedDescriptor descriptor; + descriptor.m_TransposeWeightMatrix = true; + descriptor.m_BiasEnabled = biasEnabled; + descriptor.m_ConstantWeights = weightsTensorInfo.IsConstant(); + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("FULLY_CONNECTED", + tfLiteContext, + IsFullyConnectedSupported, + delegateData.m_Backends, + isSupported, + setBackend, + reshapedTensorInfo, + outputTensorInfo, + weightsTensorInfo, + biasTensorInfo, + descriptor); + }; + + if (!delegateData.m_Network) + { + validateFunc(reshapedOutputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddFullyConnectedLayer(descriptor); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + // Add a constant layer for weights and biases if inputs are constant. + if (weightsTensorInfo.IsConstant()) + { + auto weightsTensor = CreateConstTensor(tfLiteWeightsTensor, weightsTensorInfo); + + armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(weightsTensor); + + weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u)); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsTensorInfo); + } + + if (biasEnabled) + { + if(biasTensorInfo.IsConstant()) + { + auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo); + + armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor); + ARMNN_ASSERT(biasLayer != nullptr); + + biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u)); + biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo); + } + } + + // The data input can also be constant, so we must check that this is also allocated to an input slot + if(inputTensorInfo.IsConstant()) + { + auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo); + + armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input); + inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + } + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + armnn::IConnectableLayer* reshapeLayer = nullptr; + if (inputTensorInfo.GetNumDimensions() > 2) + { + // Add reshape to flatten to 2D [batch_size, input_size] + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = reshapedTensorInfo.GetShape(); + reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor); + ARMNN_ASSERT(reshapeLayer != nullptr); + + reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedTensorInfo); + + // Connect + delegateData.m_OutputSlotForNode[inputTensors[0]]->Connect(reshapeLayer->GetInputSlot(0)); + reshapeLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + + if (!descriptor.m_ConstantWeights) + { + delegateData.m_OutputSlotForNode[inputTensors[1]]->Connect(layer->GetInputSlot(1)); + } + + if (biasEnabled && !biasTensorInfo.IsConstant()) + { + delegateData.m_OutputSlotForNode[inputTensors[2]]->Connect(layer->GetInputSlot(2)); + } + delegateData.m_OutputSlotForNode[outputTensors[0]] = &outputSlot; + } + + if (reshapeLayer == nullptr) + { + if(Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk) + { + return kTfLiteError; + } + } + + if (outputTensorInfo.GetNumDimensions() > 2) + { + layer = AddReshapeLayer(tfLiteContext, + tfLiteNode, + layer, + reshapedOutputTensorInfo, + outputTensorInfo, + delegateData); + if (!layer) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Failed to add reshape for FullyConnected #%d node #%d: ", + operatorCode, + nodeIndex); + return kTfLiteError; + } + } + + if (!tfLiteNodeParameters) + { + // No Activation + return kTfLiteOk; + } + + // Check and Create Activation + return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData); +} + +} // namespace armnnOpaqueDelegate diff --git a/delegate/opaque/src/Prelu.hpp b/delegate/opaque/src/Prelu.hpp index e16969768e..1a4037eb35 100644 --- a/delegate/opaque/src/Prelu.hpp +++ b/delegate/opaque/src/Prelu.hpp @@ -2,3 +2,124 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus ValidatePreluOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& alphaInfo, + const armnn::TensorInfo& outputInfo) +{ + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("PRELU", + tfLiteContext, + IsPreluSupported, + delegateData.m_Backends, + isSupported, + armnn::BackendId(), + inputInfo, + alphaInfo, + outputInfo); + }; + + validateFunc(outputInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus VisitPreluOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensor. + int numInputs = 0; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteAlphaTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]); + if (!IsValid(tfLiteContext, tfLiteAlphaTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& alphaTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteAlphaTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + if (!delegateData.m_Network) + { + return ValidatePreluOperator(delegateData, + tfLiteContext, + inputTensorInfo, + alphaTensorInfo, + outputTensorInfo); + } + + armnn::IConnectableLayer* preluLayer = delegateData.m_Network->AddPreluLayer(); + ARMNN_ASSERT(preluLayer != nullptr); + + bool isConstantAlpha = IsConstantTensor(tfLiteAlphaTensor); + + // Add constant layer for constant alpha + if (isConstantAlpha) + { + auto constAlphaTensor = armnn::ConstTensor(alphaTensorInfo, TfLiteOpaqueTensorData(tfLiteAlphaTensor)); + + armnn::IConnectableLayer* constLayer = delegateData.m_Network->AddConstantLayer(constAlphaTensor); + ARMNN_ASSERT(constLayer != nullptr); + + constLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo); + constLayer->GetOutputSlot(0).Connect(preluLayer->GetInputSlot(1)); + } + + armnn::IOutputSlot& outputSlot = preluLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // Connect + return Connect(preluLayer, tfLiteContext, tfLiteNode, delegateData); +} + +} // namespace armnnOpaqueDelegate diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index 38a67e7c46..de88c3e242 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -664,12 +664,24 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinDepthwiseConv2d); + case kTfLiteBuiltinElu: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinElu); case kTfLiteBuiltinEqual: return VisitComparisonOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, kTfLiteBuiltinEqual); + case kTfLiteBuiltinFullyConnected: + return VisitFullyConnectedOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinFullyConnected); case kTfLiteBuiltinGather: return VisitGatherOperator(delegateData, tfLiteContext, @@ -694,6 +706,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinGreaterEqual); + case kTfLiteBuiltinHardSwish: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinHardSwish); case kTfLiteBuiltinLess: return VisitComparisonOperator(delegateData, tfLiteContext, @@ -706,6 +724,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinLessEqual); + case kTfLiteBuiltinLogistic: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinLogistic); case kTfLiteBuiltinMean: return VisitControlOperator(delegateData, tfLiteContext, @@ -718,12 +742,42 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinNotEqual); + case kTfLiteBuiltinPrelu: + return VisitPreluOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinPrelu); + case kTfLiteBuiltinRelu: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinRelu); + case kTfLiteBuiltinReluN1To1: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinReluN1To1); + case kTfLiteBuiltinRelu6: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinRelu6); case kTfLiteBuiltinSpaceToBatchNd: return VisitSpaceToBatchNdOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, kTfLiteBuiltinSpaceToBatchNd); + case kTfLiteBuiltinTanh: + return VisitActivationOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinTanh); default: return kTfLiteError; } -- cgit v1.2.1