From ecebb0f321d996a276bfe79efd9c8bef25834a92 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 27 Apr 2023 21:37:56 +0100 Subject: IVGCVSW-7596 IVGCVSW-7619 IVGCVSW-7597 Pack, Unpack and Pad for opaque delegate Signed-off-by: Teresa Charlin Change-Id: I25415793497f0ee08d880539e265b133875a20f7 --- delegate/CMakeLists.txt | 8 +- delegate/opaque/CMakeLists.txt | 5 +- delegate/opaque/src/Pack.hpp | 141 ++++++++++++++++++++ delegate/opaque/src/Pad.hpp | 191 +++++++++++++++++++++++++++ delegate/opaque/src/Unpack.hpp | 227 +++++++++++++++++++++++++++++++++ delegate/opaque/src/armnn_delegate.cpp | 24 ++++ 6 files changed, 594 insertions(+), 2 deletions(-) diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index a77e630da0..081b3f32a0 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -292,6 +292,10 @@ if(BUILD_UNIT_TESTS) test/LogicalTestHelper.hpp test/NormalizationTest.cpp test/NormalizationTestHelper.hpp + test/PackTest.cpp + test/PackTestHelper.hpp + test/PadTest.cpp + test/PadTestHelper.hpp test/Pooling2dTest.cpp test/Pooling2dTestHelper.hpp test/Pooling3dTest.cpp @@ -304,7 +308,9 @@ if(BUILD_UNIT_TESTS) test/ShapeTestHelper.hpp test/TestUtils.hpp test/TestUtils.cpp - test/TransposeConvolution2dTest.cpp) + test/TransposeConvolution2dTest.cpp + test/UnpackTest.cpp + test/UnpackTestHelper.hpp) # Until all operators are supported, we have to add tests one by one above to opaqueDelegate_unittest_sources. # After we add can add commonDelegate_unittest_sources to the add_executable below. diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt index 716bac6b53..c5eaa20872 100644 --- a/delegate/opaque/CMakeLists.txt +++ b/delegate/opaque/CMakeLists.txt @@ -23,13 +23,16 @@ list(APPEND armnnOpaqueDelegateObject_sources src/LogicalBinary.hpp src/Lstm.hpp src/Normalization.hpp + src/Pad.hpp src/Pooling.hpp + src/Pack.hpp src/Prelu.hpp src/Redefine.hpp src/Round.hpp src/Shape.hpp src/SharedFunctions.cpp - src/SharedFunctions.hpp) + src/SharedFunctions.hpp + src/Unpack.hpp) add_library(armnnOpaqueDelegateObject OBJECT ${armnnOpaqueDelegateObject_sources}) diff --git a/delegate/opaque/src/Pack.hpp b/delegate/opaque/src/Pack.hpp index e16969768e..c3ea7da7f7 100644 --- a/delegate/opaque/src/Pack.hpp +++ b/delegate/opaque/src/Pack.hpp @@ -2,3 +2,144 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus VisitPackOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t tfLitePackOperatorCode) +{ + // Check Inputs + auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode); + if (numInputs < 1) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Must have at least one input in (%d != %d) in node #%d", + 1, + numInputs, + nodeIndex); + return kTfLiteError; + } + + // Gather input indices and use to get input tensors. + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // Validate all inputs and get TensorInfo + std::vector inputTensorInfos; + for (int i = 0; i < numInputs; ++i) + { + const TfLiteOpaqueTensor* inputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[i]); + if (!IsValid(tfLiteContext, inputTensor, tfLitePackOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(inputTensor); + inputTensorInfos.emplace_back(inputTensorInfo); + } + + // Convert inputTensorInfos to const armnn::TensorInfo* type for FORWARD_LAYER_OPAQUE_SUPPORT_FUNC. + std::vector inputConstTensorInfos; + std::transform(inputTensorInfos.begin(), + inputTensorInfos.end(), + std::back_inserter(inputConstTensorInfos), + [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; }); + + // Check outputs + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather output indices and use to get output tensor. + const int* outputTensors; + int numOutputs; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // Validate the output and get TensorInfo + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLitePackOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + armnn::StackDescriptor desc; + desc.m_NumInputs = static_cast(numInputs); + + // Get axis from TfLite parameters + auto* tfLiteNodeParameters = reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + auto axis = tfLiteNodeParameters->axis; + desc.m_Axis = NonNegative(axis, nodeIndex); + + // Use the tensor shape of the first input as the "correct" input shape in the descriptor + desc.m_InputShape = inputTensorInfos[0].GetShape(); + + // Check if supported + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("STACK", + tfLiteContext, + IsStackSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputConstTensorInfos, + outputTensorInfo, + desc); + }; + + // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the + // support for the operator + // If supported, VisitPackOperator will be called again to add the layer to the network as seen below + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // The TfLite Pack operator is equivalent to the ArmNN Stack operator + armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + // Connect the Constant Inputs + auto inputsTensorsProcess = ProcessInputs(layer, + delegateData, + tfLiteContext, + tfLiteNode); + if (inputsTensorsProcess == kTfLiteError) + { + return inputsTensorsProcess; + } + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // Connect + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); +} + +} // namespace armnnOpaqueDelegate \ No newline at end of file diff --git a/delegate/opaque/src/Pad.hpp b/delegate/opaque/src/Pad.hpp index e16969768e..112e7bb152 100644 --- a/delegate/opaque/src/Pad.hpp +++ b/delegate/opaque/src/Pad.hpp @@ -2,3 +2,194 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus VisitPadOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t tfLitePadOperatorCode) +{ + switch(tfLitePadOperatorCode) + { + case kTfLiteBuiltinMirrorPad: + case kTfLiteBuiltinPad: + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + break; + case kTfLiteBuiltinPadv2: + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex)); + break; + default: + return kTfLiteError; + } + + // Inputs + int numInputs = 0; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLitePadOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLitePaddingTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]); + if (!IsValid(tfLiteContext, tfLitePaddingTensor, tfLitePadOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Output + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLitePadOperatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& paddingTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitePaddingTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + // Get the padding data from the input tensor + auto* paddingData = static_cast(TfLiteOpaqueTensorData(tfLitePaddingTensor)); + + size_t step = 2; + armnn::PadDescriptor descriptor; + for (unsigned int i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i) + { + descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]); + } + + if (tfLitePadOperatorCode == kTfLiteBuiltinPad && inputTensorInfo.IsQuantized()) + { + descriptor.m_PadValue = inputTensorInfo.GetQuantizationOffset(); + } + else if (tfLitePadOperatorCode == kTfLiteBuiltinPadv2) + { + const TfLiteOpaqueTensor* tfLitepaddingValue = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + inputTensors[2]); + armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitepaddingValue); + if (paddingValueTensorInfo.GetNumElements() != 1) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Multiple padding value are not supported in operator #%d node #%d: ", + tfLitePadOperatorCode, nodeIndex); + return kTfLiteError; + } + // Get the padding value from the input tensor + switch (TfLiteOpaqueTensorType(tfLitepaddingValue)) + { + case kTfLiteFloat32: + descriptor.m_PadValue = static_cast(TfLiteOpaqueTensorData(tfLitepaddingValue))[0]; + break; + case kTfLiteUInt8: + descriptor.m_PadValue = static_cast(TfLiteOpaqueTensorData(tfLitepaddingValue))[0]; + break; + case kTfLiteInt8: + descriptor.m_PadValue = static_cast(TfLiteOpaqueTensorData(tfLitepaddingValue))[0]; + break; + default: + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Padding value datatype is not supported in operator #%d node #%d: ", + tfLitePadOperatorCode, nodeIndex); + return kTfLiteError; + } + } + else if (tfLitePadOperatorCode == kTfLiteBuiltinMirrorPad) + { + auto* options = reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + + if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect) + { + descriptor.m_PaddingMode = armnn::PaddingMode::Reflect; + } + else if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric) + { + descriptor.m_PaddingMode = armnn::PaddingMode::Symmetric; + } + else + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: PaddingMode must be either REFLECT or SYMMETRIC " + "in operator #%d node #%d: ", + tfLitePadOperatorCode, nodeIndex); + } + + // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1. + // If padding mode is Symmetric then both paddings must be no greater than inputShape(i). + auto inputShape = inputTensorInfo.GetShape(); + auto padList = descriptor.m_PadList; + + const auto isReflect = static_cast(descriptor.m_PaddingMode == armnn::PaddingMode::Reflect); + for(unsigned int i = 0; i < padList.size(); ++i) + { + if(padList.at(i).first > (inputShape[i] - isReflect) || + padList.at(i).second > (inputShape[i] - isReflect)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Padding values must be less (Reflect) or " + "equal (Symmetric) to the dimension size in operator #%d node #%d: ", + tfLitePadOperatorCode, nodeIndex); + } + } + } + + armnn::BackendId setBackend; + if (!delegateData.m_Network) + { + bool isSupported = false; + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("PAD", + tfLiteContext, + IsPadSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outputTensorInfo, + descriptor); + + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor); + padLayer->SetBackendId(setBackend); + ARMNN_ASSERT(padLayer != nullptr); + + armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + return Connect(padLayer, tfLiteContext, tfLiteNode, delegateData); +} + +} // namespace armnnOpaqueDelegate \ No newline at end of file diff --git a/delegate/opaque/src/Unpack.hpp b/delegate/opaque/src/Unpack.hpp index e16969768e..9b87bf7995 100644 --- a/delegate/opaque/src/Unpack.hpp +++ b/delegate/opaque/src/Unpack.hpp @@ -2,3 +2,230 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus VisitUnpackOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + // Check inputs + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const int* inputTensors; + int numInputs; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + auto* tfLiteNodeParameters = reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + + // Get Unpack Axis + const unsigned int unpackAxis = NonNegative(tfLiteNodeParameters->axis, nodeIndex); + + if (unpackAxis >= inputTensorInfo.GetNumDimensions()) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: The unpack axis #%d cannot be greater than or equal to " + "the number of input dimensions #%d in operator #%d node #%d", + unpackAxis, inputTensorInfo.GetNumDimensions(), operatorCode, nodeIndex); + return kTfLiteError; + } + + // Get Unpack Num + unsigned int unpackNum = NonNegative(tfLiteNodeParameters->num, nodeIndex); + + // If num is not defined, automatically infer from the length of the dimension axis. + if(unpackNum == 0) + { + unpackNum = inputTensorInfo.GetShape()[unpackAxis]; + } + + // If unpack number cannot be inferred and is still zero, return kTfLiteError. + if(unpackNum == 0) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Number to unpack must greater than zero in operator #%d node #%d: ", + operatorCode, nodeIndex); + return kTfLiteError; + } + + // Check outputs + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, unpackNum, nodeIndex)); + + auto inputDimSize = inputTensorInfo.GetNumDimensions(); + std::vector unpackDimSizes(inputDimSize); + + // Add current input shape to unpackDimSizes + for (unsigned int i = 0; i < inputDimSize; ++i) + { + unpackDimSizes[i] = inputTensorInfo.GetShape()[i]; + } + + if (unpackDimSizes[unpackAxis] != unpackNum) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Number to unpack must be the same as length " + "of the dimension to unpack along in operator #%d node #%d: ", + operatorCode, nodeIndex); + return kTfLiteError; + } + + unpackDimSizes[unpackAxis] /= unpackNum; + + armnn::SplitterDescriptor splitDesc(unpackNum, static_cast(unpackDimSizes.size())); + for (unsigned int j = 0; j < unpackNum; ++j) + { + // Set the size of the views. + for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx) + { + splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]); + } + splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j); + } + + // Gather output indices and use to get output tensors. + const int* outputTensors; + int numOutputs; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // Validate all outputs and get TensorInfo + std::vector outputs; + for (unsigned int i = 0; i < unpackNum; ++i) + { + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + outputTensors[i]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true)); + } + + const std::vector> outputTensorInfos(outputs.begin(), outputs.end()); + + // Determine the shape of the Splitter layer outputs for validation + armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast(unpackDimSizes.size()), + unpackDimSizes.data()); + + std::vector splitterOutputs; + for (unsigned int outputIndex = 0; outputIndex < outputTensorInfos.size(); ++outputIndex) + { + splitterOutputs.push_back(armnn::TensorInfo(splitOutShape, + outputTensorInfos[outputIndex].get().GetDataType(), + outputTensorInfos[outputIndex].get().GetQuantizationScale(), + outputTensorInfos[outputIndex].get().GetQuantizationOffset())); + } + std::vector> splitterOutputTensorInfos(splitterOutputs.begin(), + splitterOutputs.end()); + + armnn::BackendId setBackendSplit; + if (!delegateData.m_Network) + { + // Check if splitter is supported + bool isSupported = false; + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("UNPACK", + tfLiteContext, + IsSplitterSupported, + delegateData.m_Backends, + isSupported, + setBackendSplit, + inputTensorInfo, + splitterOutputTensorInfos, + splitDesc); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // Create Reshape descriptor from the first outputTensorInfo to validate a single Reshape layer + // Use this descriptor later when creating every ReshapeLayer as all Reshape Layers should be the same + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape(); + + armnn::BackendId setBackendReshape; + if (!delegateData.m_Network) + { + bool isSupported = false; + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE", + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + setBackendReshape, + splitterOutputTensorInfos[0], + outputTensorInfos[0], + reshapeDescriptor); + return isSupported ? kTfLiteOk : kTfLiteError; + }; + + std::string splitterLayerName("Unpack Splitter"); + + armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc, + splitterLayerName.c_str()); + splitterLayer->SetBackendId(setBackendSplit); + ARMNN_ASSERT(splitterLayer != nullptr); + + for (unsigned int k = 0; k < splitterLayer->GetNumOutputSlots(); ++k) + { + splitterLayer->GetOutputSlot(k).SetTensorInfo(outputs[k]); + } + + // Connect the input slots + auto inputIndex = static_cast(inputTensors[0]); + delegateData.m_OutputSlotForNode[inputIndex]->Connect(splitterLayer->GetInputSlot(0)); + + // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter. + for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex) + { + std::string reshapeLayerName("Unpack Reshape"); + armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor, + reshapeLayerName.c_str()); + reshapeLayer->SetBackendId(setBackendReshape); + ARMNN_ASSERT(reshapeLayer != nullptr); + + splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]); + splitterLayer->GetOutputSlot(outputIndex).Connect(reshapeLayer->GetInputSlot(0)); + + armnn::TensorInfo outputTensorInfo = outputTensorInfos[outputIndex]; + reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + armnn::IOutputSlot& slot = reshapeLayer->GetOutputSlot(0); + + delegateData.m_OutputSlotForNode[ + static_cast(static_cast(outputTensors[outputIndex]))] = &slot; + + } + + return kTfLiteOk; +} + +} // namespace armnnOpaqueDelegate \ No newline at end of file diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index 5cef9c42ff..b38b528317 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -918,6 +918,24 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, nodeIndex, kTfLiteBuiltinNotEqual, armnn::ComparisonOperation::NotEqual); + case kTfLiteBuiltinPack: + return VisitPackOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinPack); + case kTfLiteBuiltinPad: + return VisitPadOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinPad); + case kTfLiteBuiltinPadv2: + return VisitPadOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinPadv2); case kTfLiteBuiltinPrelu: return VisitPreluOperator(delegateData, tfLiteContext, @@ -993,6 +1011,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinTransposeConv); + case kTfLiteBuiltinUnpack: + return VisitUnpackOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinUnpack); default: return kTfLiteError; } -- cgit v1.2.1