From 92ce35cda7c5e97eff05d6f37dc86990386309bb Mon Sep 17 00:00:00 2001 From: Tianle Cheng Date: Tue, 25 Jul 2023 16:41:00 +0100 Subject: IVGCVSW-7886 Add TILE to delegate and opaque delegate * Adding support for Tile in classic and opaque delegates * CMake files updated * Tests added Signed-off-by: Tianle Cheng Change-Id: I9b52cea3480eb71961cbccb1a346805f73b5661a --- delegate/CMakeLists.txt | 2 + delegate/classic/CMakeLists.txt | 1 + delegate/classic/src/Tile.hpp | 169 ++++++++++++++++++++++++++++ delegate/classic/src/armnn_delegate.cpp | 7 ++ delegate/opaque/CMakeLists.txt | 1 + delegate/opaque/src/Tile.hpp | 188 ++++++++++++++++++++++++++++++++ delegate/opaque/src/armnn_delegate.cpp | 15 ++- delegate/test/TileTest.cpp | 91 ++++++++++++++++ delegate/test/TileTestHelper.hpp | 149 +++++++++++++++++++++++++ docs/05_03_delegate.dox | 2 + 10 files changed, 621 insertions(+), 4 deletions(-) create mode 100644 delegate/classic/src/Tile.hpp create mode 100644 delegate/opaque/src/Tile.hpp create mode 100644 delegate/test/TileTest.cpp create mode 100644 delegate/test/TileTestHelper.hpp diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index e46ac04092..c1bf73a6ab 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -201,6 +201,8 @@ if(BUILD_UNIT_TESTS) test/SplitTestHelper.hpp test/TestUtils.hpp test/TestUtils.cpp + test/TileTest.cpp + test/TileTestHelper.hpp test/TransposeConvolution2dTest.cpp test/TransposeTest.cpp test/TransposeTestHelper.hpp diff --git a/delegate/classic/CMakeLists.txt b/delegate/classic/CMakeLists.txt index 8f872d6adc..7807153359 100644 --- a/delegate/classic/CMakeLists.txt +++ b/delegate/classic/CMakeLists.txt @@ -44,6 +44,7 @@ list(APPEND armnnClassicDelegateObject_sources src/SpaceDepth.hpp src/Split.hpp src/Unpack.hpp + src/Tile.hpp src/Transpose.hpp) add_library(armnnClassicDelegateObject OBJECT ${armnnClassicDelegateObject_sources}) diff --git a/delegate/classic/src/Tile.hpp b/delegate/classic/src/Tile.hpp new file mode 100644 index 0000000000..974c771a7e --- /dev/null +++ b/delegate/classic/src/Tile.hpp @@ -0,0 +1,169 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +namespace armnnDelegate +{ +TfLiteStatus ValidateTileOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const armnn::TileDescriptor& descriptor) +{ + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC("TILE", + tfLiteContext, + IsTileSupported, + delegateData.m_Backends, + isSupported, + armnn::BackendId(), + inputInfo, + outputInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus VisitTileOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + TfLiteNode* tfLiteNode, + int nodeIndex, + int32_t tileOperatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + + // The input contains the data that should be tiled + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (IsDynamicTensor(tfLiteInputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The multiples tensor contains the number of copies for each axis + const TfLiteTensor& tfLiteMultiplesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (IsDynamicTensor(tfLiteMultiplesTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The output tensor + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + const armnn::TensorInfo& multiplesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteMultiplesTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + // Multiples length must be the same as the number of dimension in input tensor + if (multiplesTensorInfo.GetNumElements() != inputTensorInfo.GetNumDimensions()) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: The Multiples length must be the same as the number of dimension in input tensor", + "Operator: #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + // Get the Multiples data: In armnn, the values of the multiples input tensor is saved in the operator descriptor + // We have to read it from the input tensor and write it the descriptor + auto* multiplesTensorDataPtr = tflite::GetTensorData(&tfLiteMultiplesTensor); + auto multiplesTensorNum = tfLiteMultiplesTensor.dims->data[0]; + std::vector multiplesIntData(multiplesTensorDataPtr, multiplesTensorDataPtr + multiplesTensorNum); + + // The multiples must be positive + for (auto multiple : multiplesIntData) + { + if (multiple < 0) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: The Multiples must be positive values", + "Operator: #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + } + + // The original input from TFLite is int32, and we have to make it as uint32 for our descriptor + std::vector multiplesUintData; + std::transform(multiplesIntData.begin(), + multiplesIntData.end(), + std::back_inserter(multiplesUintData), + [] (const int value) + { + return static_cast(value); + }); + + armnn::TileDescriptor tileDescriptor; + tileDescriptor.m_Multiples = multiplesUintData; + + // Check output dimensions + if (inputTensorInfo.GetNumDimensions() != outputTensorInfo.GetNumDimensions()) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnDelegate: Input tensor dimension and output tensor dimension differ", + "Operator: #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + // No network pointer indicates that only support for this operator should be checked + if (!delegateData.m_Network) + { + return ValidateTileOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + tileDescriptor); + } + + std::string layerName("Tile"); + armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str()); + + if (layer == nullptr) + { + return kTfLiteError; + } + + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk) + { + return kTfLiteError; + } + + return Connect(layer, tfLiteNode, delegateData); +} + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp index 0f9e8a624c..45bea3d442 100644 --- a/delegate/classic/src/armnn_delegate.cpp +++ b/delegate/classic/src/armnn_delegate.cpp @@ -39,6 +39,7 @@ #include "Softmax.hpp" #include "SpaceDepth.hpp" #include "Split.hpp" +#include "Tile.hpp" #include "Transpose.hpp" #include "UnidirectionalSequenceLstm.hpp" #include "Unpack.hpp" @@ -1064,6 +1065,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinTanh); + case kTfLiteBuiltinTile: + return VisitTileOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinTile); case kTfLiteBuiltinUnidirectionalSequenceLstm: return VisitUnidirectionalSequenceLstmOperator(delegateData, tfLiteContext, diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt index 787046d80c..c05bccf8c9 100644 --- a/delegate/opaque/CMakeLists.txt +++ b/delegate/opaque/CMakeLists.txt @@ -41,6 +41,7 @@ list(APPEND armnnOpaqueDelegateObject_sources src/SpaceDepth.hpp src/Split.hpp src/StridedSlice.hpp + src/Tile.hpp src/Transpose.hpp src/UnidirectionalSequenceLstm.hpp src/Unpack.hpp) diff --git a/delegate/opaque/src/Tile.hpp b/delegate/opaque/src/Tile.hpp new file mode 100644 index 0000000000..17cbdee7eb --- /dev/null +++ b/delegate/opaque/src/Tile.hpp @@ -0,0 +1,188 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnnOpaqueDelegate +{ +TfLiteStatus ValidateTileOperator(DelegateData& delegateData, + TfLiteOpaqueContext *tfLiteContext, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo, + const armnn::TileDescriptor& descriptor) +{ + bool isSupported = false; + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("TILE", + tfLiteContext, + IsTileSupported, + delegateData.m_Backends, + isSupported, + armnn::BackendId(), + inputInfo, + outputInfo, + descriptor); + return isSupported ? kTfLiteOk : kTfLiteError; +} + +TfLiteStatus VisitTileOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t tileOperatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input tensors + auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode); + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // Gather output tensors + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + // The input contains the data that should be tiled + const TfLiteOpaqueTensor* tfLiteInputTensor = + TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (IsDynamicTensor(tfLiteInputTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The multiples tensor contains the number of copies for each axis + const TfLiteOpaqueTensor* tfLiteMultiplesTensor = + TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);; + if (IsDynamicTensor(tfLiteMultiplesTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + // The output tensor + const TfLiteOpaqueTensor* tfLiteOutputTensor = + TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (IsDynamicTensor(tfLiteOutputTensor)) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& multiplesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteMultiplesTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + // Multiples length must be the same as the number of dimension in input tensor + if (multiplesTensorInfo.GetNumElements() != inputTensorInfo.GetNumDimensions()) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate:", + "The Multiples length must be the same as the number of dimension in input tensor", + "Operator: #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + // Get the Multiples data: In armnn, the values of the multiples input tensor is saved in the operator descriptor + // We have to read it from the input tensor and write it the descriptor + auto* multiplesTensorDataPtr = static_cast(TfLiteOpaqueTensorData(tfLiteMultiplesTensor)); + auto multiplesTensorNum = TfLiteOpaqueTensorDim(tfLiteMultiplesTensor, 0); + std::vector multiplesIntData(multiplesTensorDataPtr, multiplesTensorDataPtr + multiplesTensorNum); + + // The multiples must be positive + for (auto multiple : multiplesIntData) + { + if (multiple < 0) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: The Multiples must be positive values", + "Operator: #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + } + + // The original input from TFLite is int32, and we have to make it as uint32 for our descriptor + std::vector multiplesUintData; + std::transform(multiplesIntData.begin(), + multiplesIntData.end(), + std::back_inserter(multiplesUintData), + [] (const int value) + { + return static_cast(value); + }); + + armnn::TileDescriptor tileDescriptor; + tileDescriptor.m_Multiples = multiplesUintData; + + // Check output dimensions + if (inputTensorInfo.GetNumDimensions() != outputTensorInfo.GetNumDimensions()) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Input tensor dimension and output tensor dimension differ", + "Operator: #%d node #%d: ", + tileOperatorCode, nodeIndex); + return kTfLiteError; + } + + // No network pointer indicates that only support for this operator should be checked + if (!delegateData.m_Network) + { + return ValidateTileOperator(delegateData, + tfLiteContext, + inputTensorInfo, + outputTensorInfo, + tileDescriptor); + } + + std::string layerName("Tile"); + armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str()); + + if (layer == nullptr) + { + return kTfLiteError; + } + + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk) + { + return kTfLiteError; + } + + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); +} + +} // namespace armnnOpaqueDelegate \ No newline at end of file diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index 510352eae9..49fa30d8f0 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -38,6 +38,7 @@ #include "Softmax.hpp" #include "SpaceDepth.hpp" #include "Split.hpp" +#include "Tile.hpp" #include "Transpose.hpp" #include "UnidirectionalSequenceLstm.hpp" #include "Unpack.hpp" @@ -1138,12 +1139,18 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinTanh); + case kTfLiteBuiltinTile: + return VisitTileOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinTile); case kTfLiteBuiltinTranspose: return VisitTransposeOperator(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - kTfLiteBuiltinTranspose); + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinTranspose); case kTfLiteBuiltinTransposeConv: return VisitConvolutionOperator(delegateData, tfLiteContext, diff --git a/delegate/test/TileTest.cpp b/delegate/test/TileTest.cpp new file mode 100644 index 0000000000..2e20859f5b --- /dev/null +++ b/delegate/test/TileTest.cpp @@ -0,0 +1,91 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "TileTestHelper.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace armnnDelegate +{ +void TileFloat32Test(std::vector& backends) +{ + // Set input data + std::vector inputValues = + { + 0.f, 1.f, 2.f, + 3.f, 4.f, 5.f + }; + + // Set output data + std::vector expectedOutputValues = + { + 0.f, 1.f, 2.f, 0.f, 1.f, 2.f, + 3.f, 4.f, 5.f, 3.f, 4.f, 5.f, + + 0.f, 1.f, 2.f, 0.f, 1.f, 2.f, + 3.f, 4.f, 5.f, 3.f, 4.f, 5.f + }; + + // The multiples + const std::vector multiplesValues = { 2, 2 }; + + // Set shapes + const std::vector inputShape = { 2, 3 }; + const std::vector multiplesShape = { 2 }; + const std::vector expectedOutputShape = { 4, 6 }; + + TileFP32TestImpl(tflite::BuiltinOperator_TILE, + backends, + inputValues, + inputShape, + multiplesValues, + multiplesShape, + expectedOutputValues, + expectedOutputShape); +} + +#if defined(TILE_GPUACC) +TEST_SUITE("TileTests_GpuAccTests") +{ + + TEST_CASE ("Tile_Float32_GpuAcc_Test") + { + std::vector backends = { armnn::Compute::GpuAcc }; + TileFloat32Test(backends); + } + +} // TEST_SUITE("Tile_Float32_GpuAcc_Test") +#endif + +TEST_SUITE("TileTests_CpuAccTests") +{ + + TEST_CASE ("Tile_Float32_CpuAcc_Test") + { + std::vector backends = { armnn::Compute::CpuAcc }; + TileFloat32Test(backends); + } + +} // TEST_SUITE("Tile_Float32_CpuAcc_Test") + +TEST_SUITE("TileTests_CpuRefTests") +{ + + TEST_CASE ("Tile_Float32_CpuRef_Test") + { + std::vector backends = { armnn::Compute::CpuRef }; + TileFloat32Test(backends); + } + +} // TEST_SUITE("Tile_Float32_CpuRef_Test") + +} // namespace armnnDelegate \ No newline at end of file diff --git a/delegate/test/TileTestHelper.hpp b/delegate/test/TileTestHelper.hpp new file mode 100644 index 0000000000..f376612f36 --- /dev/null +++ b/delegate/test/TileTestHelper.hpp @@ -0,0 +1,149 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "TestUtils.hpp" + +#include +#include + +#include +#include +#include + +#include + +#include + +namespace +{ +std::vector CreateTileTfLiteModel(tflite::BuiltinOperator operatorCode, + tflite::TensorType inputTensorType, + const std::vector& inputTensorShape, + const std::vector& multiplesTensorData, + const std::vector& multiplesTensorShape, + const std::vector& outputTensorShape) +{ + using namespace tflite; + flatbuffers::FlatBufferBuilder flatBufferBuilder; + + std::vector> buffers; + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + buffers.push_back(CreateBuffer(flatBufferBuilder, + flatBufferBuilder.CreateVector( + reinterpret_cast(multiplesTensorData.data()), + sizeof(int32_t) * multiplesTensorData.size()))); + buffers.push_back(CreateBuffer(flatBufferBuilder)); + + std::array, 3> tensors; + tensors[0] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(inputTensorShape.data(), + inputTensorShape.size()), + inputTensorType, + 1, + flatBufferBuilder.CreateString("input_tensor")); + + tensors[1] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(multiplesTensorShape.data(), + multiplesTensorShape.size()), + TensorType_INT32, + 2, + flatBufferBuilder.CreateString("axis_input_tensor")); + + tensors[2] = CreateTensor(flatBufferBuilder, + flatBufferBuilder.CreateVector(outputTensorShape.data(), + outputTensorShape.size()), + inputTensorType, + 3, + flatBufferBuilder.CreateString("output_tensor")); + + // Create Operator + tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE; + flatbuffers::Offset operatorBuiltinOption = 0; + + const std::vector operatorInputs {0, 1}; + const std::vector operatorOutputs {2}; + + flatbuffers::Offset tileOperator = + CreateOperator(flatBufferBuilder, + 0, + flatBufferBuilder.CreateVector(operatorInputs.data(), operatorInputs.size()), + flatBufferBuilder.CreateVector(operatorOutputs.data(), operatorOutputs.size()), + operatorBuiltinOptionsType, + operatorBuiltinOption); + + const std::vector subgraphInputs{0, 1}; + const std::vector subgraphOutputs{2}; + flatbuffers::Offset subgraph = + CreateSubGraph(flatBufferBuilder, + flatBufferBuilder.CreateVector(tensors.data(), tensors.size()), + flatBufferBuilder.CreateVector(subgraphInputs.data(), subgraphInputs.size()), + flatBufferBuilder.CreateVector(subgraphOutputs.data(), subgraphOutputs.size()), + flatBufferBuilder.CreateVector(&tileOperator, 1)); + + flatbuffers::Offset modelDescription = + flatBufferBuilder.CreateString("ArmnnDelegate: Tile Operator Model"); + flatbuffers::Offset opCode = CreateOperatorCode(flatBufferBuilder, operatorCode); + + flatbuffers::Offset flatbufferModel = + CreateModel(flatBufferBuilder, + TFLITE_SCHEMA_VERSION, + flatBufferBuilder.CreateVector(&opCode, 1), + flatBufferBuilder.CreateVector(&subgraph, 1), + modelDescription, + flatBufferBuilder.CreateVector(buffers.data(), buffers.size())); + + flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER); + + return std::vector(flatBufferBuilder.GetBufferPointer(), + flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize()); +} + +void TileFP32TestImpl(tflite::BuiltinOperator operatorCode, + std::vector& backends, + std::vector& inputValues, + std::vector inputShape, + std::vector multiplesValues, + std::vector multiplesShapes, + std::vector& expectedOutputValues, + std::vector expectedOutputShape) +{ + using namespace delegateTestInterpreter; + + std::vector modelBuffer = CreateTileTfLiteModel(operatorCode, + ::tflite::TensorType::TensorType_FLOAT32, + inputShape, + multiplesValues, + multiplesShapes, + expectedOutputShape); + + // Setup interpreter with just TFLite Runtime. + auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer); + CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(tfLiteInterpreter.FillInputTensor(multiplesValues, 1) == kTfLiteOk); + CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk); + std::vector tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0); + std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); + + // Setup interpreter with Arm NN Delegate applied. + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); + CHECK(armnnInterpreter.FillInputTensor(multiplesValues, 1) == kTfLiteOk); + CHECK(armnnInterpreter.Invoke() == kTfLiteOk); + std::vector armnnOutputValues = armnnInterpreter.GetOutputResult(0); + std::vector armnnOutputShape = armnnInterpreter.GetOutputShape(0); + + armnnDelegate::CompareOutputData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues); + armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape); + + tfLiteInterpreter.Cleanup(); + armnnInterpreter.Cleanup(); +} + +} // anonymous namespace \ No newline at end of file diff --git a/docs/05_03_delegate.dox b/docs/05_03_delegate.dox index 632afa0cf0..153d1eb4ca 100644 --- a/docs/05_03_delegate.dox +++ b/docs/05_03_delegate.dox @@ -201,6 +201,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato - TANH +- TILE + - TRANSPOSE - TRANSPOSE_CONV -- cgit v1.2.1