From 5f45027909bba9f4abeeef6d8a265ed345d564ae Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 12 Feb 2019 14:31:45 +0000 Subject: IVGCVSW-2640 Add Serializer & Deserializer for Mul * Updated Serializer schema for Multiplication support * Added support for Multiplication to Serializer and Deserializer Change-Id: I10ad8ad4b37876a963ccdcf7074cb66f40531bde Signed-off-by: Sadik Armagan --- src/armnnDeserializeParser/DeserializeParser.cpp | 25 +++- src/armnnDeserializeParser/DeserializeParser.hpp | 1 + src/armnnDeserializeParser/DeserializerSupport.md | 1 + .../test/DeserializeMultiplication.cpp | 161 +++++++++++++++++++++ 4 files changed, 187 insertions(+), 1 deletion(-) create mode 100644 src/armnnDeserializeParser/test/DeserializeMultiplication.cpp (limited to 'src/armnnDeserializeParser') diff --git a/src/armnnDeserializeParser/DeserializeParser.cpp b/src/armnnDeserializeParser/DeserializeParser.cpp index 5ba92d51e2..0368ccabf2 100644 --- a/src/armnnDeserializeParser/DeserializeParser.cpp +++ b/src/armnnDeserializeParser/DeserializeParser.cpp @@ -132,7 +132,8 @@ DeserializeParser::DeserializeParser() m_ParserFunctions(Layer_MAX+1, &DeserializeParser::ParseUnsupportedLayer) { // register supported layers - m_ParserFunctions[Layer_AdditionLayer] = &DeserializeParser::ParseAdd; + m_ParserFunctions[Layer_AdditionLayer] = &DeserializeParser::ParseAdd; + m_ParserFunctions[Layer_MultiplicationLayer] = &DeserializeParser::ParseMultiplication; } DeserializeParser::LayerBaseRawPtr DeserializeParser::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex) @@ -145,6 +146,8 @@ DeserializeParser::LayerBaseRawPtr DeserializeParser::GetBaseLayer(const GraphPt return graphPtr->layers()->Get(layerIndex)->layer_as_AdditionLayer()->base(); case Layer::Layer_InputLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->base(); + case Layer::Layer_MultiplicationLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_MultiplicationLayer()->base(); case Layer::Layer_OutputLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->base(); case Layer::Layer_NONE: @@ -582,6 +585,26 @@ void DeserializeParser::ParseAdd(unsigned int layerIndex) RegisterOutputSlots(layerIndex, layer); } +void DeserializeParser::ParseMultiplication(unsigned int layerIndex) +{ + CHECK_LAYERS(m_Graph, 0, layerIndex); + auto inputs = GetInputs(m_Graph, layerIndex); + CHECK_LOCATION(); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(m_Graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = boost::str(boost::format("Multiplication:%1%") % layerIndex); + IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(layerIndex, layer); + RegisterOutputSlots(layerIndex, layer); +} + } diff --git a/src/armnnDeserializeParser/DeserializeParser.hpp b/src/armnnDeserializeParser/DeserializeParser.hpp index 322826c762..9edd959220 100644 --- a/src/armnnDeserializeParser/DeserializeParser.hpp +++ b/src/armnnDeserializeParser/DeserializeParser.hpp @@ -64,6 +64,7 @@ private: void ParseUnsupportedLayer(unsigned int serializeGraphIndex); void ParseAdd(unsigned int serializeGraphIndex); + void ParseMultiplication(unsigned int serializeGraphIndex); void RegisterOutputSlotOfConnection(uint32_t connectionIndex, armnn::IOutputSlot* slot); void RegisterInputSlotOfConnection(uint32_t connectionIndex, armnn::IInputSlot* slot); diff --git a/src/armnnDeserializeParser/DeserializerSupport.md b/src/armnnDeserializeParser/DeserializerSupport.md index 7135003421..8e1433419e 100644 --- a/src/armnnDeserializeParser/DeserializerSupport.md +++ b/src/armnnDeserializeParser/DeserializerSupport.md @@ -7,5 +7,6 @@ This reference guide provides a list of layers which can be deserialized current The Arm NN SDK Deserialize parser currently supports the following layers: * Addition +* Multiplication More machine learning layers will be supported in future releases. diff --git a/src/armnnDeserializeParser/test/DeserializeMultiplication.cpp b/src/armnnDeserializeParser/test/DeserializeMultiplication.cpp new file mode 100644 index 0000000000..f8eff1697a --- /dev/null +++ b/src/armnnDeserializeParser/test/DeserializeMultiplication.cpp @@ -0,0 +1,161 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersSerializeFixture.hpp" +#include "../DeserializeParser.hpp" + +#include +#include + +BOOST_AUTO_TEST_SUITE(DeserializeParser) + +struct MultiplicationFixture : public ParserFlatbuffersSerializeFixture +{ + explicit MultiplicationFixture(const std::string & inputShape1, + const std::string & inputShape2, + const std::string & outputShape, + const std::string & dataType, + const std::string & activation="NONE") + { + m_JsonString = R"( + { + inputIds: [0, 1], + outputIds: [3], + layers: [ + { + layer_type: "InputLayer", + layer: { + base: { + layerBindingId: 0, + base: { + index: 0, + layerName: "InputLayer1", + layerType: "Input", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [ { + index: 0, + tensorInfo: { + dimensions: )" + inputShape1 + R"(, + dataType: )" + dataType + R"( + }, + }], + },}}, + }, + { + layer_type: "InputLayer", + layer: { + base: { + layerBindingId: 1, + base: { + index:1, + layerName: "InputLayer2", + layerType: "Input", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [ { + index: 0, + tensorInfo: { + dimensions: )" + inputShape2 + R"(, + dataType: )" + dataType + R"( + }, + }], + },}}, + }, + { + layer_type: "MultiplicationLayer", + layer : { + base: { + index:2, + layerName: "MultiplicationLayer", + layerType: "Multiplication", + inputSlots: [ + { + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }, + { + index: 1, + connection: {sourceLayerIndex:1, outputSlotIndex:0 }, + } + ], + outputSlots: [ { + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: )" + dataType + R"( + }, + }], + }}, + }, + { + layer_type: "OutputLayer", + layer: { + base:{ + layerBindingId: 3, + base: { + index: 3, + layerName: "OutputLayer", + layerType: "Output", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:2, outputSlotIndex:0 }, + }], + outputSlots: [ { + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: )" + dataType + R"( + }, + }], + }}}, + }] + } + )"; + Setup(); + } +}; + + +struct SimpleMultiplicationFixture : MultiplicationFixture +{ + SimpleMultiplicationFixture() : MultiplicationFixture("[ 2, 2 ]", + "[ 2, 2 ]", + "[ 2, 2 ]", + "QuantisedAsymm8") {} +}; + +struct SimpleMultiplicationFixture2 : MultiplicationFixture +{ + SimpleMultiplicationFixture2() : MultiplicationFixture("[ 2, 2, 1, 1 ]", + "[ 2, 2, 1, 1 ]", + "[ 2, 2, 1, 1 ]", + "Float32") {} +}; + +BOOST_FIXTURE_TEST_CASE(MultiplicationQuantisedAsymm8, SimpleMultiplicationFixture) +{ + RunTest<2, armnn::DataType::QuantisedAsymm8>( + 0, + {{"InputLayer1", { 0, 1, 2, 3 }}, + {"InputLayer2", { 4, 5, 6, 7 }}}, + {{"OutputLayer", { 0, 5, 12, 21 }}}); +} + +BOOST_FIXTURE_TEST_CASE(MultiplicationFloat32, SimpleMultiplicationFixture2) +{ + RunTest<4, armnn::DataType::Float32>( + 0, + {{"InputLayer1", { 100, 40, 226, 9 }}, + {"InputLayer2", { 5, 8, 1, 12 }}}, + {{"OutputLayer", { 500, 320, 226, 108 }}}); +} + +BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1