aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-07-26 10:07:55 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-07-26 13:16:42 +0000
commit777008b49aabdb6d1a57221b3ca875be5b182b46 (patch)
treef41b0cec2a81cced2a23f771426a2c9ac9d43c1b
parent1bf56cde4f199ea8df722d1875ee44bd0ffb414a (diff)
downloadarmnn-777008b49aabdb6d1a57221b3ca875be5b182b46.tar.gz
IVGCVSW-7885 Add TILE to TFLite parser
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ic2c3a89b89f44f111e5a184c83db89ea1cb52976
-rw-r--r--CMakeLists.txt1
-rw-r--r--docs/05_01_parsers.dox1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp43
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp1
-rw-r--r--src/armnnTfLiteParser/test/Tile.cpp114
5 files changed, 160 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index bf598aa200..42da39b8a8 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -724,6 +724,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/StridedSlice.cpp
src/armnnTfLiteParser/test/Sub.cpp
src/armnnTfLiteParser/test/Sum.cpp
+ src/armnnTfLiteParser/test/Tile.cpp
src/armnnTfLiteParser/test/TransposeConv.cpp
src/armnnTfLiteParser/test/Transpose.cpp
src/armnnTfLiteParser/test/Unpack.cpp
diff --git a/docs/05_01_parsers.dox b/docs/05_01_parsers.dox
index 0be5f30b0d..6050d65953 100644
--- a/docs/05_01_parsers.dox
+++ b/docs/05_01_parsers.dox
@@ -191,6 +191,7 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators
- SUB
- SUM
- TANH
+- TILE
- TRANSPOSE
- TRANSPOSE_CONV
- UNIDIRECTIONAL_SEQUENCE_LSTM
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 77ce565959..a6d651f5ab 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -826,6 +826,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt
m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
+ m_ParserFunctions[tflite::BuiltinOperator_TILE] = &TfLiteParserImpl::ParseTile;
m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
@@ -3306,6 +3307,48 @@ void TfLiteParserImpl::ParseReverseV2(size_t subgraphIndex, size_t operatorIndex
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParserImpl::ParseTile(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 2);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ TensorInfo multiplesTensorInfo = ToTensorInfo(inputs[1]);
+ TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+
+ auto layerName = fmt::format("Tile:{}:{}", subgraphIndex, operatorIndex);
+
+ TileDescriptor descriptor;
+
+ BufferRawPtr multiplesBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+ if (multiplesBufferPtr != nullptr)
+ {
+ std::vector<int32_t> multiplesData(multiplesTensorInfo.GetNumElements());
+ ::memcpy(multiplesData.data(), multiplesBufferPtr->data.data(), multiplesTensorInfo.GetNumBytes());
+ descriptor.m_Multiples.assign(multiplesData.begin(), multiplesData.end());
+ }
+ else
+ {
+ ARMNN_THROW_PARSE_EXCEPTION("For Tile layer, Multiples data was not found in the buffer.");
+ }
+
+ IConnectableLayer* layer = m_Network->AddTileLayer(descriptor, layerName.c_str());
+ ARMNN_ASSERT(layer != nullptr);
+
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index eb452895a2..e7f265915a 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -194,6 +194,7 @@ private:
void ParseSub(size_t subgraphIndex, size_t operatorIndex);
void ParseSum(size_t subgraphIndex, size_t operatorIndex);
void ParseTanH(size_t subgraphIndex, size_t operatorIndex);
+ void ParseTile(size_t subgraphIndex, size_t operatorIndex);
void ParseTranspose(size_t subgraphIndex, size_t operatorIndex);
void ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex);
void ParseUnidirectionalSequenceLSTM(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/Tile.cpp b/src/armnnTfLiteParser/test/Tile.cpp
new file mode 100644
index 0000000000..b0c663ac8b
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Tile.cpp
@@ -0,0 +1,114 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersFixture.hpp"
+
+TEST_SUITE("TensorflowLiteParser_Tile")
+{
+struct TileFixture : public ParserFlatbuffersFixture
+{
+ explicit TileFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& multiplesShape,
+ const std::string& multiplesData,
+ const std::string& dataType = "FLOAT32",
+ const std::string& scale = "1.0",
+ const std::string& offset = "0")
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [
+ {
+ "deprecated_builtin_code": 69,
+ "version": 1,
+ "builtin_code": "TILE"
+ }
+ ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": )" + inputShape + R"(,
+ "type": )" + dataType + R"(,
+ "buffer": 1,
+ "name": "inputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + scale + R"( ],
+ "zero_point": [ )" + offset + R"( ],
+ },
+ "is_variable": false,
+ "has_rank": true
+ },
+ {
+ "shape": )" + multiplesShape + R"(,
+ "type": "INT32",
+ "buffer": 2,
+ "name": "multiples",
+ "quantization": {
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false,
+ "has_rank": true
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": )" + dataType + R"(,
+ "buffer": 3,
+ "name": "outputTensor",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + scale + R"( ],
+ "zero_point": [ )" + offset + R"( ],
+ },
+ "is_variable": false,
+ "has_rank": true
+ }
+ ],
+ "inputs": [ 0 ],
+ "outputs": [ 2 ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [ 0, 1 ],
+ "outputs": [ 2 ],
+ "builtin_options_type": "NONE",
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { },
+ { "data": )" + multiplesData + R"(, },
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ }
+};
+
+struct SimpleTileFixture : public TileFixture
+{
+ SimpleTileFixture() : TileFixture("[ 2, 2 ]", "[ 4, 6 ]", "[ 2 ]", "[ 2, 0, 0, 0, 3, 0, 0, 0 ]") {}
+};
+
+TEST_CASE_FIXTURE(SimpleTileFixture, "ParseTile")
+{
+ RunTest<2, armnn::DataType::Float32, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1, 2,
+ 3, 4 }}},
+ {{ "outputTensor", { 1, 2, 1, 2, 1, 2,
+ 3, 4, 3, 4, 3, 4,
+ 1, 2, 1, 2, 1, 2,
+ 3, 4, 3, 4, 3, 4, }}});
+}
+
+} \ No newline at end of file