From f803f78f3f1c4ad86e4be3d6e7ce8d46a462f2e5 Mon Sep 17 00:00:00 2001 From: Bruno Goncalves Date: Tue, 18 Dec 2018 13:40:30 -0200 Subject: Added ParseMul method to TfLiteParser Change-Id: I72e61a5ece52d69b289a1559907e2fb3084bfa4a --- src/armnnTfLiteParser/TfLiteParser.cpp | 29 +++++++ src/armnnTfLiteParser/TfLiteParser.hpp | 1 + src/armnnTfLiteParser/test/Multiplication.cpp | 112 ++++++++++++++++++++++++++ 3 files changed, 142 insertions(+) create mode 100644 src/armnnTfLiteParser/test/Multiplication.cpp (limited to 'src/armnnTfLiteParser') diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 156ff03d82..e5af76b234 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -451,6 +451,7 @@ TfLiteParser::TfLiteParser() m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax; m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze; m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd; + m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul; } void TfLiteParser::ResetParser() @@ -1020,6 +1021,34 @@ void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex) RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); } +void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; + const auto * options = operatorPtr->builtin_options.AsMulOptions(); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = boost::str(boost::format("Mul:%1%:%2%") % subgraphIndex % operatorIndex); + IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str()); + + TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]}); + + layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function); + + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index 24fb421c25..42e1814e2e 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -102,6 +102,7 @@ private: void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex); void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex); void ParseAdd(size_t subgraphIndex, size_t operatorIndex); + void ParseMul(size_t subgraphIndex, size_t operatorIndex); void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm); diff --git a/src/armnnTfLiteParser/test/Multiplication.cpp b/src/armnnTfLiteParser/test/Multiplication.cpp new file mode 100644 index 0000000000..802799c2b4 --- /dev/null +++ b/src/armnnTfLiteParser/test/Multiplication.cpp @@ -0,0 +1,112 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersFixture.hpp" +#include "../TfLiteParser.hpp" + +#include +#include + +BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) + +struct MultiplicationFixture : public ParserFlatbuffersFixture +{ + explicit MultiplicationFixture(const std::string & inputShape1, + const std::string & inputShape2, + const std::string & outputShape, + const std::string & activation="NONE") + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": "MUL" } ], + "subgraphs": [ { + "tensors": [ + { + "shape": )" + inputShape1 + R"(, + "type": "FLOAT32", + "buffer": 0, + "name": "inputTensor1", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + inputShape2 + R"(, + "type": "FLOAT32", + "buffer": 1, + "name": "inputTensor2", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + outputShape + R"( , + "type": "FLOAT32", + "buffer": 2, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ 0, 1 ], + "outputs": [ 2 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ 0, 1 ], + "outputs": [ 2 ], + "builtin_options_type": "MulOptions", + "builtin_options": { + "fused_activation_function": )" + activation + R"( + }, + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [ + { }, + { } + ] + } + )"; + Setup(); + } +}; + +struct SimpleMultiplicationFixture : public MultiplicationFixture +{ + SimpleMultiplicationFixture() : MultiplicationFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParseMultiplication, SimpleMultiplicationFixture) +{ + RunTest<4, float>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f, + 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, + 9.0f, 10.0f, 11.0f } }, + { "inputTensor2", { 1.0f, 1.0f, 1.0f, + 5.0f, 5.0f, 5.0f, + 1.0f, 1.0f, 1.0f, + 5.0f, 5.0f, 5.0f} } }, + {{ "outputTensor", { 0.0f, 1.0f, 2.0f, + 15.0f, 20.0f, 25.0f, + 6.0f, 7.0f, 8.0f, + 45.0f, 50.0f, 55.0f } } }); +} + +BOOST_AUTO_TEST_SUITE_END() + -- cgit v1.2.1