aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2022-02-25 13:21:55 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2022-03-09 10:47:31 +0000
commitcdbd40bcfe3f3b35d5ba2133e365cf0157a867c1 (patch)
tree1e6c047adcbcbc0cf86983f1ba5f3ff0ac633a3e
parent89655004ba20d36ec4882ed9c10f5d91aa244af2 (diff)
downloadarmnn-cdbd40bcfe3f3b35d5ba2133e365cf0157a867c1.tar.gz
IVGCVSW-6454 Add FLOOR_DIV Support to the TfLiteParser
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I7ab32f2998c2bc25fee8831d4be724286263b4b3
-rw-r--r--CMakeLists.txt1
-rw-r--r--docs/05_01_parsers.dox1
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp41
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.hpp4
-rw-r--r--src/armnnTfLiteParser/test/FloorDiv.cpp185
5 files changed, 232 insertions, 0 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c74d71745f..c069b052cc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -689,6 +689,7 @@ if(BUILD_UNIT_TESTS)
src/armnnTfLiteParser/test/Div.cpp
src/armnnTfLiteParser/test/ElementWiseUnary.cpp
src/armnnTfLiteParser/test/ExpandDims.cpp
+ src/armnnTfLiteParser/test/FloorDiv.cpp
src/armnnTfLiteParser/test/FullyConnected.cpp
src/armnnTfLiteParser/test/Gather.cpp
src/armnnTfLiteParser/test/L2Normalization.cpp
diff --git a/docs/05_01_parsers.dox b/docs/05_01_parsers.dox
index a4526e04d5..7284810d42 100644
--- a/docs/05_01_parsers.dox
+++ b/docs/05_01_parsers.dox
@@ -134,6 +134,7 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators
- EQUAL
- EXP
- EXPAND_DIMS
+- FLOOR_DIV
- FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE
- GATHER
- GREATER
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 81cbb9c8c0..5c6d619845 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -689,6 +689,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOpt
m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
+ m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
@@ -2111,6 +2112,34 @@ void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
}
+void TfLiteParserImpl::ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(inputs.size(), 2);
+
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+ armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
+
+ auto layerName = fmt::format("Div:{}:{}", subgraphIndex, operatorIndex);
+ IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
+ ARMNN_ASSERT(layer != nullptr);
+
+ TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
+ layer = AddFusedFloorLayer(layer, 0);
+
+ auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
@@ -3943,6 +3972,18 @@ armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConn
return activationLayer;
}
+armnn::IConnectableLayer* TfLiteParserImpl::AddFusedFloorLayer(armnn::IConnectableLayer* prevLayer,
+ unsigned int outputSlot)
+{
+ std::string layerName = prevLayer->GetName();
+ IConnectableLayer* floorLayer = m_Network->AddFloorLayer(layerName.c_str());
+
+ auto & prevOutputSlot = prevLayer->GetOutputSlot(outputSlot);
+ prevOutputSlot.Connect(floorLayer->GetInputSlot(0));
+ floorLayer->GetOutputSlot(0).SetTensorInfo(prevOutputSlot.GetTensorInfo());
+ return floorLayer;
+}
+
TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char* fileName)
{
if (fileName == nullptr)
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 512b87fd6c..474393cbe6 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -133,6 +133,7 @@ private:
void ParseEqual(size_t subgraphIndex, size_t operatorIndex);
void ParseExp(size_t subgraphIndex, size_t operatorIndex);
void ParseExpandDims(size_t subgraphIndex, size_t operatorIndex);
+ void ParseFloorDiv(size_t subgraphIndex, size_t operatorIndex);
void ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex);
void ParseGather(size_t subgraphIndex, size_t operatorIndex);
void ParseGreater(size_t subgraphIndex, size_t operatorIndex);
@@ -211,6 +212,9 @@ private:
unsigned int outputSlot,
tflite::ActivationFunctionType activationType);
+ /// Attach a floor layer to the one passed as a parameter
+ armnn::IConnectableLayer* AddFusedFloorLayer(armnn::IConnectableLayer* layer, unsigned int outputSlot);
+
// SupportedDataStorage's purpose is to hold data till we pass over to the network.
// We don't care about the content, and we want a single datatype to simplify the code.
struct SupportedDataStorage
diff --git a/src/armnnTfLiteParser/test/FloorDiv.cpp b/src/armnnTfLiteParser/test/FloorDiv.cpp
new file mode 100644
index 0000000000..dfd7b14bf4
--- /dev/null
+++ b/src/armnnTfLiteParser/test/FloorDiv.cpp
@@ -0,0 +1,185 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersFixture.hpp"
+
+
+TEST_SUITE("TensorflowLiteParser_Div")
+{
+struct FloorDivFixture : public ParserFlatbuffersFixture
+{
+ explicit FloorDivFixture(const std::string& inputShape1,
+ const std::string& inputShape2,
+ const std::string& outputShape,
+ const std::string& inputShapeSignature1,
+ const std::string& inputShapeSignature2,
+ const std::string& outputShapeSignature)
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [
+ {
+ "deprecated_builtin_code": 90,
+ "version": 2,
+ "builtin_code": "FLOOR_DIV"
+ }
+ ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": )" + inputShape1 + R"(,
+ "type": "FLOAT32",
+ "buffer": 1,
+ "name": "inputTensor1",
+ "quantization": {
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false,
+ "shape_signature": )" + inputShapeSignature1 + R"(,
+ },
+ {
+ "shape": )" + inputShape2 + R"(,
+ "type": "FLOAT32",
+ "buffer": 2,
+ "name": "inputTensor2",
+ "quantization": {
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false,
+ "shape_signature": )" + inputShapeSignature2 + R"(,
+ },
+ {
+ "shape": )" + outputShape + R"(,
+ "type": "FLOAT32",
+ "buffer": 3,
+ "name": "outputTensor",
+ "quantization": {
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false,
+ "shape_signature": )" + outputShapeSignature + R"(,
+ }
+ ],
+ "inputs": [
+ 0,
+ 1
+ ],
+ "outputs": [
+ 2
+ ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [
+ 0,
+ 1
+ ],
+ "outputs": [
+ 2
+ ],
+ "builtin_options_type": "NONE",
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ "name": "main"
+ }
+ ],
+ "description": "MLIR Converted.",
+ "buffers": [ {}, {}, {}, {},
+ {
+ "data": [
+ 49,
+ 46,
+ 49,
+ 52,
+ 46,
+ 48,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ]
+ }
+ ],
+ "metadata": [
+ {
+ "name": "min_runtime_version",
+ "buffer": 4
+ }
+ ],
+ "signature_defs": [
+
+ ]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct SimpleFloorDivFixture : public FloorDivFixture
+{
+ SimpleFloorDivFixture() : FloorDivFixture("[ 1, 3, 4 ]", "[ 1, 3, 4 ]", "[ 1, 3, 4 ]",
+ "[ -1, 3, 4 ]", "[ -1, 3, 4 ]", "[ -1, 3, 4 ]") {}
+};
+
+TEST_CASE_FIXTURE(SimpleFloorDivFixture, "ParseFloorDiv")
+{
+ using armnn::DataType;
+ float Inf = std::numeric_limits<float>::infinity();
+ float NaN = std::numeric_limits<float>::quiet_NaN();
+
+ RunTest<3, DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, -7.0f, 8.0f,
+ 9.0f, 10.0f, -11.0f } },
+ { "inputTensor2", { 0.0f, 0.0f, 4.0f,
+ 3.0f, 40.0f, 5.0f,
+ 6.0f, 2.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f} } },
+ {{ "outputTensor", { NaN, Inf, 0.0f,
+ 1.0f, 0.0f, 1.0f,
+ 1.0f, -4.0f, 1.0f,
+ 1.0f, 1.0f, -1.0f } } });
+}
+
+
+struct DynamicFloorDivFixture : public FloorDivFixture
+{
+ DynamicFloorDivFixture() : FloorDivFixture("[ 1, 3, 4 ]", "[ 1, 3, 4 ]", "[ 1, 3, 4 ]",
+ "[ -1, 3, 4 ]", "[ -1, 3, 4 ]", "[ -1, 3, 4 ]") {}
+};
+
+TEST_CASE_FIXTURE(DynamicFloorDivFixture, "ParseDynamicFloorDiv")
+{
+ using armnn::DataType;
+ float Inf = std::numeric_limits<float>::infinity();
+ float NaN = std::numeric_limits<float>::quiet_NaN();
+
+ RunTest<3, DataType::Float32, DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, -7.0f, 8.0f,
+ 9.0f, 10.0f, -11.0f } },
+ { "inputTensor2", { 0.0f, 0.0f, 4.0f,
+ 3.0f, 40.0f, 5.0f,
+ 6.0f, 2.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f} } },
+ {{ "outputTensor", { NaN, Inf, 0.0f,
+ 1.0f, 0.0f, 1.0f,
+ 1.0f, -4.0f, 1.0f,
+ 1.0f, 1.0f, -1.0f } } }, true);
+}
+
+}