From 48ec813697c7d431a9159e5759fb31a41739fb10 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 27 Apr 2023 17:04:47 +0100 Subject: IVGCVSW-7608 IVGCVSW-7594 IVGCVSW-7598 IVGCVSW-7599 Implement Floor, Lstm, Pooling2d and Pooling3d operators for Opaque Delegate Signed-off-by: Matthew Sloyan Change-Id: Ic9af1c50589285ab359661699d32a889cd267cd9 --- delegate/CMakeLists.txt | 8 + delegate/opaque/CMakeLists.txt | 3 + delegate/opaque/src/Lstm.hpp | 289 ++++++++++++++++++++++++++ delegate/opaque/src/Pooling.hpp | 365 +++++++++++++++++++++++++++++++++ delegate/opaque/src/Round.hpp | 87 ++++++++ delegate/opaque/src/armnn_delegate.cpp | 54 +++++ 6 files changed, 806 insertions(+) diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index 4600f42301..8045ee8e1d 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -285,12 +285,20 @@ if(BUILD_UNIT_TESTS) test/GatherTestHelper.hpp test/GatherNdTest.cpp test/GatherNdTestHelper.hpp + test/LstmTest.cpp + test/LstmTestHelper.hpp test/LogicalTest.cpp test/LogicalTestHelper.hpp test/NormalizationTest.cpp test/NormalizationTestHelper.hpp + test/Pooling2dTest.cpp + test/Pooling2dTestHelper.hpp + test/Pooling3dTest.cpp + test/Pooling3dTestHelper.hpp test/PreluTest.cpp test/PreluTestHelper.hpp + test/RoundTest.cpp + test/RoundTestHelper.hpp test/ShapeTest.cpp test/ShapeTestHelper.hpp test/TestUtils.hpp diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt index dd16a70048..f1c8851396 100644 --- a/delegate/opaque/CMakeLists.txt +++ b/delegate/opaque/CMakeLists.txt @@ -20,9 +20,12 @@ list(APPEND armnnOpaqueDelegateObject_sources src/Gather.hpp src/GatherNd.hpp src/LogicalBinary.hpp + src/Lstm.hpp src/Normalization.hpp + src/Pooling.hpp src/Prelu.hpp src/Redefine.hpp + src/Round.hpp src/Shape.hpp src/SharedFunctions.cpp src/SharedFunctions.hpp) diff --git a/delegate/opaque/src/Lstm.hpp b/delegate/opaque/src/Lstm.hpp index e16969768e..b896b462d3 100644 --- a/delegate/opaque/src/Lstm.hpp +++ b/delegate/opaque/src/Lstm.hpp @@ -2,3 +2,292 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus VisitLstmOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode); + if (numInputs < 2) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d", + 2, numInputs, nodeIndex); + return kTfLiteError; + } + + // Gather input indices and use to get input tensor. + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Set the params structure for the AddLstmLayer call + armnn::LstmInputParams params; + + if (IsOptionalOperandPresent(tfLiteNode, 1)) + { + params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 1); + } + + params.m_InputToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 2); + params.m_InputToCellWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 3); + params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 4); + + // Recurrent weight tensors of size {n_cell, n_output} + if (IsOptionalOperandPresent(tfLiteNode, 5)) + { + params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 5); + } + + params.m_RecurrentToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 6); + params.m_RecurrentToCellWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 7); + params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 8); + + // Peephole weights tensors of size {n_cell}, representing a diagonal matrix. + if (IsOptionalOperandPresent(tfLiteNode, 9)) + { + params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 9); + } + + if (IsOptionalOperandPresent(tfLiteNode, 10)) + { + params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 10); + } + + if (IsOptionalOperandPresent(tfLiteNode, 11)) + { + params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 11); + } + + // Gates bias tensors of size {n_cell} + if (IsOptionalOperandPresent(tfLiteNode, 12)) + { + params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 12); + } + + params.m_ForgetGateBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 13); + params.m_CellBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 14); + params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 15); + + // Projection weight tensor of size {n_output, n_cell} + if (IsOptionalOperandPresent(tfLiteNode, 16)) + { + params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 16); + } + // Projection bias tensor of size {n_output} + if (IsOptionalOperandPresent(tfLiteNode, 17)) + { + params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 17); + } + + // These state tensors are defined as variable tensors, and will be modified by this op. + const TfLiteOpaqueTensor* tfLiteOutputStateIn = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[18]); + if (!IsValid(tfLiteContext, tfLiteOutputStateIn, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + const TfLiteOpaqueTensor* cellStateIn = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[19]); + if (!IsValid(tfLiteContext, cellStateIn, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + armnn::TensorInfo outputStateInInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputStateIn); + armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteOpaqueTensor(cellStateIn); + + // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix. + if (IsOptionalOperandPresent(tfLiteNode, 20)) + { + params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 20); + } + + if (IsOptionalOperandPresent(tfLiteNode, 21)) + { + params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 21); + } + + if (IsOptionalOperandPresent(tfLiteNode, 22)) + { + params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 22); + } + + if (IsOptionalOperandPresent(tfLiteNode, 23)) + { + params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 23); + } + + const auto nodeParams = reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + + // set the layer descriptor + armnn::LstmDescriptor desc; + desc.m_ActivationFunc = NonNegative(nodeParams->activation, nodeIndex); + desc.m_ClippingThresCell = nodeParams->cell_clip; + desc.m_ClippingThresProj = nodeParams->proj_clip; + desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr + || params.m_RecurrentToInputWeights == nullptr + || params.m_InputGateBias == nullptr); + desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr); + desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr); + desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr + || params.m_ForgetLayerNormWeights != nullptr + || params.m_CellLayerNormWeights != nullptr + || params.m_OutputLayerNormWeights != nullptr); + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + unsigned int batchSize = inputTensorInfo.GetShape()[0]; + unsigned int outputSize = outputTensorInfo.GetShape()[1]; + unsigned int numUnits = cellStateInInfo.GetShape()[1]; + + armnn::DataType dataType = inputTensorInfo.GetDataType(); + float qScale = inputTensorInfo.GetQuantizationScale(); + float qOffset = inputTensorInfo.GetQuantizationOffset(); + + armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset); + if (!desc.m_CifgEnabled) + { + scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset); + } + armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, dataType, qScale, qOffset); + armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset); + + armnn::LstmInputParamsInfo paramsInfo; + paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo()); + paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo()); + paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo()); + paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo()); + paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo()); + paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo()); + paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo()); + paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo()); + paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo()); + + if (!desc.m_CifgEnabled) + { + paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo()); + paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo()); + if (params.m_CellToInputWeights != nullptr) + { + paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo()); + } + paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo()); + } + + if (desc.m_ProjectionEnabled) + { + paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo()); + if (params.m_ProjectionBias != nullptr) + { + paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo()); + } + } + + if (desc.m_PeepholeEnabled) + { + paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo()); + paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo()); + } + + if (desc.m_LayerNormEnabled) + { + if(!desc.m_CifgEnabled) + { + paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo()); + } + paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo()); + paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo()); + paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo()); + } + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("LSTM", + tfLiteContext, + IsLstmSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outputStateInInfo, + cellStateInInfo, + scratchBufferTensorInfo, + outputStateOutTensorInfo, + cellStateOutTensorInfo, + outputInfo, + desc, + paramsInfo); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddLstmLayer(desc, params); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + layer->GetOutputSlot(0).SetTensorInfo(scratchBufferTensorInfo); + layer->GetOutputSlot(1).SetTensorInfo(outputStateOutTensorInfo); + layer->GetOutputSlot(2).SetTensorInfo(cellStateOutTensorInfo); + layer->GetOutputSlot(3).SetTensorInfo(outputTensorInfo); + + // Connect the inputs + // input_layer + delegateData.m_OutputSlotForNode[inputTensors[0]]->Connect(layer->GetInputSlot(0)); + // cellStateIn + delegateData.m_OutputSlotForNode[inputTensors[18]]->Connect(layer->GetInputSlot(1)); + //outputStateIn + delegateData.m_OutputSlotForNode[inputTensors[19]]->Connect(layer->GetInputSlot(2)); + + // In the test_model there is only 1 Output + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(1); + delegateData.m_OutputSlotForNode[static_cast(outputTensors[0])] = &outputSlot; + + return kTfLiteOk; +} + +} // namespace armnnOpaqueDelegate diff --git a/delegate/opaque/src/Pooling.hpp b/delegate/opaque/src/Pooling.hpp index e16969768e..45a10f3833 100644 --- a/delegate/opaque/src/Pooling.hpp +++ b/delegate/opaque/src/Pooling.hpp @@ -2,3 +2,368 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include +#include + +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t tfLitePoolingOperatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensors. + int numInputs = 0; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLitePoolingOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLitePoolingOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + auto* tfLiteNodeParameters = reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + TfLiteFusedActivation activationType = kTfLiteActNone; + if (tfLiteNodeParameters) + { + activationType = tfLiteNodeParameters->activation; + TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, + tfLiteContext, + outputTensorInfo, + outputTensorInfo, + activationType); + if(activationStatus != kTfLiteOk) + { + return kTfLiteError; + } + } + + armnn::PoolingAlgorithm poolingAlgorithm; + switch(tfLitePoolingOperatorCode) + { + case kTfLiteBuiltinAveragePool2d: + poolingAlgorithm = armnn::PoolingAlgorithm::Average; + break; + case kTfLiteBuiltinL2Pool2d: + poolingAlgorithm = armnn::PoolingAlgorithm::L2; + break; + case kTfLiteBuiltinMaxPool2d: + poolingAlgorithm = armnn::PoolingAlgorithm::Max; + break; + default: + return kTfLiteError; + } + + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = poolingAlgorithm; + + descriptor.m_PoolWidth = tfLiteNodeParameters->filter_width; + descriptor.m_PoolHeight = tfLiteNodeParameters->filter_height; + descriptor.m_StrideX = tfLiteNodeParameters->stride_width; + descriptor.m_StrideY = tfLiteNodeParameters->stride_height; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + unsigned int inputHeight = inputTensorInfo.GetShape()[1]; + unsigned int inputWidth = inputTensorInfo.GetShape()[2]; + + CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u, + descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding); + CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u, + descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding); + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("POOLING_2D", + tfLiteContext, + IsPooling2dSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outputTensorInfo, + descriptor); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor); + poolingLayer->SetBackendId(setBackend); + ARMNN_ASSERT(poolingLayer != nullptr); + + armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) + { + return kTfLiteError; + } + + if(Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk) + { + return kTfLiteError; + } + + // Check and create activation + return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData); +} + +TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + std::string customOperatorName) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensors. + int numInputs = 0; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, kTfLiteBuiltinCustom, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, kTfLiteBuiltinCustom, nodeIndex)) + { + return kTfLiteError; + } + + // Set the input and output info + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + // Custom Operators are defined by the name string associated to the operator. Use this to determine + // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite. + armnn::PoolingAlgorithm poolingAlgorithm; + if (customOperatorName == "MaxPool3D") + { + poolingAlgorithm = armnn::PoolingAlgorithm::Max; + } + else if (customOperatorName == "AveragePool3D") + { + poolingAlgorithm = armnn::PoolingAlgorithm::Average; + } + else + { + return kTfLiteError; + } + // Create the armnn pool3d descriptor and set the algorithm parsed above. + armnn::Pooling3dDescriptor descriptor; + descriptor.m_PoolType = poolingAlgorithm; + + // custom_initial_data and custom_initial_data_size are void* variables defined in the tflite registration + // used to access the custom option buffer for the operator. + const void* customData = nullptr; + int customDataSize = 0; + if (TfLiteOpaqueNodeGetCustomInitialData(tfLiteNode, &customData, &customDataSize) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to initialise initial custom data from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // Reinterpret the void* to a byte buffer to access the options data in the flexbuffers map. + const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast(customData), + customDataSize).AsMap(); + // poolDims is a vector of [ 1, Depth, Height, Width, 1 ] + const auto poolDims = m["ksize"].AsTypedVector(); + descriptor.m_PoolWidth = poolDims[3].AsInt32(); + descriptor.m_PoolHeight = poolDims[2].AsInt32(); + descriptor.m_PoolDepth = poolDims[1].AsInt32(); + + // strideDimes is a vector of [ 1, Z, Y, X, 1] + const auto strideDims = m["strides"].AsTypedVector(); + descriptor.m_StrideX = strideDims[3].AsInt32(); + descriptor.m_StrideY = strideDims[2].AsInt32(); + descriptor.m_StrideZ = strideDims[1].AsInt32(); + descriptor.m_DataLayout = armnn::DataLayout::NDHWC; + + unsigned int inputDepth = inputTensorInfo.GetShape()[1]; + unsigned int inputHeight = inputTensorInfo.GetShape()[2]; + unsigned int inputWidth = inputTensorInfo.GetShape()[3]; + + // CalcPadding expects a TfLitePadding type. Parse flexbuffers to extract padding string and create TfLitePadding. + std::string paddingStr = m["padding"].AsString().str(); + TfLitePadding padding; + if (paddingStr == "VALID") + { + padding = kTfLitePaddingValid; + } + else if (paddingStr == "SAME") + { + padding = kTfLitePaddingSame; + } + else + { + padding = kTfLitePaddingUnknown; + } + // Calculates padding for each pooling dimension separately + CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u, + descriptor.m_PadTop, descriptor.m_PadBottom, padding); + CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u, + descriptor.m_PadLeft, descriptor.m_PadRight, padding); + CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u, + descriptor.m_PadFront, descriptor.m_PadBack, padding); + + + // Check activation by parsing the string from the flexbuffer map + std::string activationTypeStr = m["activation"].AsString().str(); + TfLiteFusedActivation activationType = kTfLiteActNone; + + if (activationTypeStr == "kTfLiteActRelu") + { + activationType = kTfLiteActRelu; + } + else if (activationTypeStr == "kTfLiteActReluN1To1") + { + activationType = kTfLiteActReluN1To1; + } + else if (activationTypeStr == "kTfLiteActRelu6") + { + activationType = kTfLiteActRelu6; + } + else if (activationTypeStr == "kTfLiteActTanh") + { + activationType = kTfLiteActTanh; + } + else if (activationTypeStr == "kTfLiteActSignBit") + { + activationType = kTfLiteActSignBit; + } + else if (activationTypeStr == "kTfLiteActSigmoid") + { + activationType = kTfLiteActSigmoid; + } + else + { + activationType = kTfLiteActNone; + } + + TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, + tfLiteContext, + outputTensorInfo, + outputTensorInfo, + activationType); + if(activationStatus != kTfLiteOk) + { + return kTfLiteError; + } + + // Validate the output info. + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("POOLING_3D", + tfLiteContext, + IsPooling3dSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outputTensorInfo, + descriptor); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // Create the Layer + armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor); + poolingLayer->SetBackendId(setBackend); + ARMNN_ASSERT(poolingLayer != nullptr); + + // Create and set output slots + armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) + { + return kTfLiteError; + } + + if(Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk) + { + return kTfLiteError; + } + + return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData); +} + +} // namespace armnnOpaqueDelegate diff --git a/delegate/opaque/src/Round.hpp b/delegate/opaque/src/Round.hpp index e16969768e..c64c210301 100644 --- a/delegate/opaque/src/Round.hpp +++ b/delegate/opaque/src/Round.hpp @@ -2,3 +2,90 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus VisitFloorOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensor. + int numInputs = 0; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // Use input indices to get input tensors. + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + // NOTE: looks like the outputTensorInfo is the only thing that is required for the case + // where we are adding the floor layer so maybe move the other stuff inside the + // if !delegateData block for efficiency. + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the + // support for the operator + // If supported, VisitFloorOperator will be called again to add the layer to the network as seen further below + if (!delegateData.m_Network) + { + return ValidateFloorOperator(delegateData, tfLiteContext, inputTensorInfo, outputTensorInfo); + } + + // Add a Floor layer + armnn::IConnectableLayer* layer = delegateData.m_Network->AddFloorLayer(); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); +} + +} // namespace armnnOpaqueDelegate diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index 3b647f3531..ead577f806 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -641,6 +641,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinArgMin); + case kTfLiteBuiltinAveragePool2d: + return VisitPooling2dOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinAveragePool2d); case kTfLiteBuiltinBatchMatmul: return VisitBatchMatMulOperator(delegateData, tfLiteContext, @@ -684,6 +690,30 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinConv3d); + case kTfLiteBuiltinCustom: + { + // Custom operators are defined by the name rather than the builtin code. + // Parse the custom_name param in the registration to point to the correct visitor function. + std::string customOperatorName = TfLiteRegistrationExternalGetCustomName(tfLiteRegistration); + if ( customOperatorName == "AveragePool3D" ) + { + return VisitPooling3dOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + customOperatorName); + } + else if (customOperatorName == "MaxPool3D") + { + return VisitPooling3dOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + customOperatorName); + } + // Invalid or unsupported custom operator + return kTfLiteError; + } case kTfLiteBuiltinDepthwiseConv2d: return VisitConvolutionOperator(delegateData, tfLiteContext, @@ -710,6 +740,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, nodeIndex, kTfLiteBuiltinExp, armnn::UnaryOperation::Exp); + case kTfLiteBuiltinFloor: + return VisitFloorOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinFloor); case kTfLiteBuiltinFullyConnected: return VisitFullyConnectedOperator(delegateData, tfLiteContext, @@ -754,6 +790,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinL2Normalization); + case kTfLiteBuiltinL2Pool2d: + return VisitPooling2dOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinL2Pool2d); case kTfLiteBuiltinLess: return VisitComparisonOperator(delegateData, tfLiteContext, @@ -808,6 +850,18 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, nodeIndex, kTfLiteBuiltinLogicalOr, armnn::LogicalBinaryOperation::LogicalOr); + case kTfLiteBuiltinLstm: + return VisitLstmOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinLstm); + case kTfLiteBuiltinMaxPool2d: + return VisitPooling2dOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinMaxPool2d); case kTfLiteBuiltinMean: return VisitControlOperator(delegateData, tfLiteContext, -- cgit v1.2.1