aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTianle Cheng <tianle.cheng@arm.com>2023-07-25 16:41:00 +0100
committerTianle Cheng <tianle.cheng@arm.com>2023-07-28 08:11:56 +0000
commit92ce35cda7c5e97eff05d6f37dc86990386309bb (patch)
treebd868f94a9ff0a126b626f94f4129736af927cfa
parent35bae832a88dc06555492e7fe1ccb36a47594c21 (diff)
downloadarmnn-92ce35cda7c5e97eff05d6f37dc86990386309bb.tar.gz
IVGCVSW-7886 Add TILE to delegate and opaque delegate
* Adding support for Tile in classic and opaque delegates * CMake files updated * Tests added Signed-off-by: Tianle Cheng <tianle.cheng@arm.com> Change-Id: I9b52cea3480eb71961cbccb1a346805f73b5661a
-rw-r--r--delegate/CMakeLists.txt2
-rw-r--r--delegate/classic/CMakeLists.txt1
-rw-r--r--delegate/classic/src/Tile.hpp169
-rw-r--r--delegate/classic/src/armnn_delegate.cpp7
-rw-r--r--delegate/opaque/CMakeLists.txt1
-rw-r--r--delegate/opaque/src/Tile.hpp188
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp15
-rw-r--r--delegate/test/TileTest.cpp91
-rw-r--r--delegate/test/TileTestHelper.hpp149
-rw-r--r--docs/05_03_delegate.dox2
10 files changed, 621 insertions, 4 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index e46ac04092..c1bf73a6ab 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -201,6 +201,8 @@ if(BUILD_UNIT_TESTS)
test/SplitTestHelper.hpp
test/TestUtils.hpp
test/TestUtils.cpp
+ test/TileTest.cpp
+ test/TileTestHelper.hpp
test/TransposeConvolution2dTest.cpp
test/TransposeTest.cpp
test/TransposeTestHelper.hpp
diff --git a/delegate/classic/CMakeLists.txt b/delegate/classic/CMakeLists.txt
index 8f872d6adc..7807153359 100644
--- a/delegate/classic/CMakeLists.txt
+++ b/delegate/classic/CMakeLists.txt
@@ -44,6 +44,7 @@ list(APPEND armnnClassicDelegateObject_sources
src/SpaceDepth.hpp
src/Split.hpp
src/Unpack.hpp
+ src/Tile.hpp
src/Transpose.hpp)
add_library(armnnClassicDelegateObject OBJECT ${armnnClassicDelegateObject_sources})
diff --git a/delegate/classic/src/Tile.hpp b/delegate/classic/src/Tile.hpp
new file mode 100644
index 0000000000..974c771a7e
--- /dev/null
+++ b/delegate/classic/src/Tile.hpp
@@ -0,0 +1,169 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+namespace armnnDelegate
+{
+TfLiteStatus ValidateTileOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::TileDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_SUPPORT_FUNC("TILE",
+ tfLiteContext,
+ IsTileSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputInfo,
+ descriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitTileOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tileOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+
+ // The input contains the data that should be tiled
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The multiples tensor contains the number of copies for each axis
+ const TfLiteTensor& tfLiteMultiplesTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (IsDynamicTensor(tfLiteMultiplesTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The output tensor
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& multiplesTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteMultiplesTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ // Multiples length must be the same as the number of dimension in input tensor
+ if (multiplesTensorInfo.GetNumElements() != inputTensorInfo.GetNumDimensions())
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Multiples length must be the same as the number of dimension in input tensor",
+ "Operator: #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Get the Multiples data: In armnn, the values of the multiples input tensor is saved in the operator descriptor
+ // We have to read it from the input tensor and write it the descriptor
+ auto* multiplesTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteMultiplesTensor);
+ auto multiplesTensorNum = tfLiteMultiplesTensor.dims->data[0];
+ std::vector<int32_t> multiplesIntData(multiplesTensorDataPtr, multiplesTensorDataPtr + multiplesTensorNum);
+
+ // The multiples must be positive
+ for (auto multiple : multiplesIntData)
+ {
+ if (multiple < 0)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: The Multiples must be positive values",
+ "Operator: #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+
+ // The original input from TFLite is int32, and we have to make it as uint32 for our descriptor
+ std::vector<uint32_t> multiplesUintData;
+ std::transform(multiplesIntData.begin(),
+ multiplesIntData.end(),
+ std::back_inserter(multiplesUintData),
+ [] (const int value)
+ {
+ return static_cast<uint32_t>(value);
+ });
+
+ armnn::TileDescriptor tileDescriptor;
+ tileDescriptor.m_Multiples = multiplesUintData;
+
+ // Check output dimensions
+ if (inputTensorInfo.GetNumDimensions() != outputTensorInfo.GetNumDimensions())
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnDelegate: Input tensor dimension and output tensor dimension differ",
+ "Operator: #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // No network pointer indicates that only support for this operator should be checked
+ if (!delegateData.m_Network)
+ {
+ return ValidateTileOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ tileDescriptor);
+ }
+
+ std::string layerName("Tile");
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str());
+
+ if (layer == nullptr)
+ {
+ return kTfLiteError;
+ }
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp
index 0f9e8a624c..45bea3d442 100644
--- a/delegate/classic/src/armnn_delegate.cpp
+++ b/delegate/classic/src/armnn_delegate.cpp
@@ -39,6 +39,7 @@
#include "Softmax.hpp"
#include "SpaceDepth.hpp"
#include "Split.hpp"
+#include "Tile.hpp"
#include "Transpose.hpp"
#include "UnidirectionalSequenceLstm.hpp"
#include "Unpack.hpp"
@@ -1064,6 +1065,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinTanh);
+ case kTfLiteBuiltinTile:
+ return VisitTileOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTile);
case kTfLiteBuiltinUnidirectionalSequenceLstm:
return VisitUnidirectionalSequenceLstmOperator(delegateData,
tfLiteContext,
diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt
index 787046d80c..c05bccf8c9 100644
--- a/delegate/opaque/CMakeLists.txt
+++ b/delegate/opaque/CMakeLists.txt
@@ -41,6 +41,7 @@ list(APPEND armnnOpaqueDelegateObject_sources
src/SpaceDepth.hpp
src/Split.hpp
src/StridedSlice.hpp
+ src/Tile.hpp
src/Transpose.hpp
src/UnidirectionalSequenceLstm.hpp
src/Unpack.hpp)
diff --git a/delegate/opaque/src/Tile.hpp b/delegate/opaque/src/Tile.hpp
new file mode 100644
index 0000000000..17cbdee7eb
--- /dev/null
+++ b/delegate/opaque/src/Tile.hpp
@@ -0,0 +1,188 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <OpaqueDelegateUtils.hpp>
+
+namespace armnnOpaqueDelegate
+{
+TfLiteStatus ValidateTileOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext *tfLiteContext,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo,
+ const armnn::TileDescriptor& descriptor)
+{
+ bool isSupported = false;
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("TILE",
+ tfLiteContext,
+ IsTileSupported,
+ delegateData.m_Backends,
+ isSupported,
+ armnn::BackendId(),
+ inputInfo,
+ outputInfo,
+ descriptor);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+}
+
+TfLiteStatus VisitTileOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t tileOperatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ // Gather input tensors
+ auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Gather output tensors
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The input contains the data that should be tiled
+ const TfLiteOpaqueTensor* tfLiteInputTensor =
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (IsDynamicTensor(tfLiteInputTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The multiples tensor contains the number of copies for each axis
+ const TfLiteOpaqueTensor* tfLiteMultiplesTensor =
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);;
+ if (IsDynamicTensor(tfLiteMultiplesTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // The output tensor
+ const TfLiteOpaqueTensor* tfLiteOutputTensor =
+ TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& multiplesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteMultiplesTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ // Multiples length must be the same as the number of dimension in input tensor
+ if (multiplesTensorInfo.GetNumElements() != inputTensorInfo.GetNumDimensions())
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate:",
+ "The Multiples length must be the same as the number of dimension in input tensor",
+ "Operator: #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Get the Multiples data: In armnn, the values of the multiples input tensor is saved in the operator descriptor
+ // We have to read it from the input tensor and write it the descriptor
+ auto* multiplesTensorDataPtr = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteMultiplesTensor));
+ auto multiplesTensorNum = TfLiteOpaqueTensorDim(tfLiteMultiplesTensor, 0);
+ std::vector<int32_t> multiplesIntData(multiplesTensorDataPtr, multiplesTensorDataPtr + multiplesTensorNum);
+
+ // The multiples must be positive
+ for (auto multiple : multiplesIntData)
+ {
+ if (multiple < 0)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: The Multiples must be positive values",
+ "Operator: #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+
+ // The original input from TFLite is int32, and we have to make it as uint32 for our descriptor
+ std::vector<uint32_t> multiplesUintData;
+ std::transform(multiplesIntData.begin(),
+ multiplesIntData.end(),
+ std::back_inserter(multiplesUintData),
+ [] (const int value)
+ {
+ return static_cast<uint32_t>(value);
+ });
+
+ armnn::TileDescriptor tileDescriptor;
+ tileDescriptor.m_Multiples = multiplesUintData;
+
+ // Check output dimensions
+ if (inputTensorInfo.GetNumDimensions() != outputTensorInfo.GetNumDimensions())
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Input tensor dimension and output tensor dimension differ",
+ "Operator: #%d node #%d: ",
+ tileOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // No network pointer indicates that only support for this operator should be checked
+ if (!delegateData.m_Network)
+ {
+ return ValidateTileOperator(delegateData,
+ tfLiteContext,
+ inputTensorInfo,
+ outputTensorInfo,
+ tileDescriptor);
+ }
+
+ std::string layerName("Tile");
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str());
+
+ if (layer == nullptr)
+ {
+ return kTfLiteError;
+ }
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+ {
+ return kTfLiteError;
+ }
+
+ return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace armnnOpaqueDelegate \ No newline at end of file
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 510352eae9..49fa30d8f0 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -38,6 +38,7 @@
#include "Softmax.hpp"
#include "SpaceDepth.hpp"
#include "Split.hpp"
+#include "Tile.hpp"
#include "Transpose.hpp"
#include "UnidirectionalSequenceLstm.hpp"
#include "Unpack.hpp"
@@ -1138,12 +1139,18 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinTanh);
+ case kTfLiteBuiltinTile:
+ return VisitTileOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTile);
case kTfLiteBuiltinTranspose:
return VisitTransposeOperator(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- kTfLiteBuiltinTranspose);
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinTranspose);
case kTfLiteBuiltinTransposeConv:
return VisitConvolutionOperator(delegateData,
tfLiteContext,
diff --git a/delegate/test/TileTest.cpp b/delegate/test/TileTest.cpp
new file mode 100644
index 0000000000..2e20859f5b
--- /dev/null
+++ b/delegate/test/TileTest.cpp
@@ -0,0 +1,91 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TileTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+void TileFloat32Test(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<float> inputValues =
+ {
+ 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f
+ };
+
+ // Set output data
+ std::vector<float> expectedOutputValues =
+ {
+ 0.f, 1.f, 2.f, 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f, 3.f, 4.f, 5.f,
+
+ 0.f, 1.f, 2.f, 0.f, 1.f, 2.f,
+ 3.f, 4.f, 5.f, 3.f, 4.f, 5.f
+ };
+
+ // The multiples
+ const std::vector<int32_t> multiplesValues = { 2, 2 };
+
+ // Set shapes
+ const std::vector<int32_t> inputShape = { 2, 3 };
+ const std::vector<int32_t> multiplesShape = { 2 };
+ const std::vector<int32_t> expectedOutputShape = { 4, 6 };
+
+ TileFP32TestImpl(tflite::BuiltinOperator_TILE,
+ backends,
+ inputValues,
+ inputShape,
+ multiplesValues,
+ multiplesShape,
+ expectedOutputValues,
+ expectedOutputShape);
+}
+
+#if defined(TILE_GPUACC)
+TEST_SUITE("TileTests_GpuAccTests")
+{
+
+ TEST_CASE ("Tile_Float32_GpuAcc_Test")
+ {
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ TileFloat32Test(backends);
+ }
+
+} // TEST_SUITE("Tile_Float32_GpuAcc_Test")
+#endif
+
+TEST_SUITE("TileTests_CpuAccTests")
+{
+
+ TEST_CASE ("Tile_Float32_CpuAcc_Test")
+ {
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ TileFloat32Test(backends);
+ }
+
+} // TEST_SUITE("Tile_Float32_CpuAcc_Test")
+
+TEST_SUITE("TileTests_CpuRefTests")
+{
+
+ TEST_CASE ("Tile_Float32_CpuRef_Test")
+ {
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ TileFloat32Test(backends);
+ }
+
+} // TEST_SUITE("Tile_Float32_CpuRef_Test")
+
+} // namespace armnnDelegate \ No newline at end of file
diff --git a/delegate/test/TileTestHelper.hpp b/delegate/test/TileTestHelper.hpp
new file mode 100644
index 0000000000..f376612f36
--- /dev/null
+++ b/delegate/test/TileTestHelper.hpp
@@ -0,0 +1,149 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/version.h>
+
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateTileTfLiteModel(tflite::BuiltinOperator operatorCode,
+ tflite::TensorType inputTensorType,
+ const std::vector<int32_t>& inputTensorShape,
+ const std::vector<int32_t>& multiplesTensorData,
+ const std::vector<int32_t>& multiplesTensorShape,
+ const std::vector<int32_t>& outputTensorShape)
+{
+ using namespace tflite;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+ buffers.push_back(CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(
+ reinterpret_cast<const uint8_t*>(multiplesTensorData.data()),
+ sizeof(int32_t) * multiplesTensorData.size())));
+ buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+ std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+ tensors[0] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+ inputTensorShape.size()),
+ inputTensorType,
+ 1,
+ flatBufferBuilder.CreateString("input_tensor"));
+
+ tensors[1] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(multiplesTensorShape.data(),
+ multiplesTensorShape.size()),
+ TensorType_INT32,
+ 2,
+ flatBufferBuilder.CreateString("axis_input_tensor"));
+
+ tensors[2] = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+ outputTensorShape.size()),
+ inputTensorType,
+ 3,
+ flatBufferBuilder.CreateString("output_tensor"));
+
+ // Create Operator
+ tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+ flatbuffers::Offset<void> operatorBuiltinOption = 0;
+
+ const std::vector<int> operatorInputs {0, 1};
+ const std::vector<int> operatorOutputs {2};
+
+ flatbuffers::Offset<Operator> tileOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ operatorBuiltinOptionsType,
+ operatorBuiltinOption);
+
+ const std::vector<int> subgraphInputs{0, 1};
+ const std::vector<int> subgraphOutputs{2};
+ flatbuffers::Offset <SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&tileOperator, 1));
+
+ flatbuffers::Offset <flatbuffers::String> modelDescription =
+ flatBufferBuilder.CreateString("ArmnnDelegate: Tile Operator Model");
+ flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder, operatorCode);
+
+ flatbuffers::Offset <Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&opCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+ flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
+
+ return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+ flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void TileFP32TestImpl(tflite::BuiltinOperator operatorCode,
+ std::vector<armnn::BackendId>& backends,
+ std::vector<float>& inputValues,
+ std::vector<int32_t> inputShape,
+ std::vector<int32_t> multiplesValues,
+ std::vector<int32_t> multiplesShapes,
+ std::vector<float>& expectedOutputValues,
+ std::vector<int32_t> expectedOutputShape)
+{
+ using namespace delegateTestInterpreter;
+
+ std::vector<char> modelBuffer = CreateTileTfLiteModel(operatorCode,
+ ::tflite::TensorType::TensorType_FLOAT32,
+ inputShape,
+ multiplesValues,
+ multiplesShapes,
+ expectedOutputShape);
+
+ // Setup interpreter with just TFLite Runtime.
+ auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+ CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(multiplesValues, 1) == kTfLiteOk);
+ CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
+
+ // Setup interpreter with Arm NN Delegate applied.
+ auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+ CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+ CHECK(armnnInterpreter.FillInputTensor<int32_t>(multiplesValues, 1) == kTfLiteOk);
+ CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+ std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+ std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
+
+ armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+ armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+ tfLiteInterpreter.Cleanup();
+ armnnInterpreter.Cleanup();
+}
+
+} // anonymous namespace \ No newline at end of file
diff --git a/docs/05_03_delegate.dox b/docs/05_03_delegate.dox
index 632afa0cf0..153d1eb4ca 100644
--- a/docs/05_03_delegate.dox
+++ b/docs/05_03_delegate.dox
@@ -201,6 +201,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato
- TANH
+- TILE
+
- TRANSPOSE
- TRANSPOSE_CONV