aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2019-02-12 14:31:45 +0000
committerSaoirse Stewart Arm <saoirse.stewart@arm.com>2019-02-13 09:10:35 +0000
commit5f45027909bba9f4abeeef6d8a265ed345d564ae (patch)
tree01bf69ba9db252a99b4dc6feccb20b08aabb7d70
parentfb1437e86d8e01af9ee9cebe4c8cd9ff508ac779 (diff)
downloadarmnn-5f45027909bba9f4abeeef6d8a265ed345d564ae.tar.gz
IVGCVSW-2640 Add Serializer & Deserializer for Mul
* Updated Serializer schema for Multiplication support * Added support for Multiplication to Serializer and Deserializer Change-Id: I10ad8ad4b37876a963ccdcf7074cb66f40531bde Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnDeserializeParser/DeserializeParser.cpp25
-rw-r--r--src/armnnDeserializeParser/DeserializeParser.hpp1
-rw-r--r--src/armnnDeserializeParser/DeserializerSupport.md1
-rw-r--r--src/armnnDeserializeParser/test/DeserializeMultiplication.cpp161
-rw-r--r--src/armnnSerializer/Schema.fbs8
-rw-r--r--src/armnnSerializer/SeralizerSupport.md1
-rw-r--r--src/armnnSerializer/Serializer.cpp14
-rw-r--r--src/armnnSerializer/Serializer.hpp3
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp26
10 files changed, 239 insertions, 2 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 074350b0a3..2768f6a18b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -552,6 +552,7 @@ if(BUILD_UNIT_TESTS)
src/armnnSerializer/Schema_generated.h
src/armnnSerializer/test/SerializerTests.cpp
src/armnnDeserializeParser/test/DeserializeAdd.cpp
+ src/armnnDeserializeParser/test/DeserializeMultiplication.cpp
src/armnnDeserializeParser/test/ParserFlatbuffersSerializeFixture.hpp
src/armnnDeserializeParser/test/SchemaSerialize.s
)
diff --git a/src/armnnDeserializeParser/DeserializeParser.cpp b/src/armnnDeserializeParser/DeserializeParser.cpp
index 5ba92d51e2..0368ccabf2 100644
--- a/src/armnnDeserializeParser/DeserializeParser.cpp
+++ b/src/armnnDeserializeParser/DeserializeParser.cpp
@@ -132,7 +132,8 @@ DeserializeParser::DeserializeParser()
m_ParserFunctions(Layer_MAX+1, &DeserializeParser::ParseUnsupportedLayer)
{
// register supported layers
- m_ParserFunctions[Layer_AdditionLayer] = &DeserializeParser::ParseAdd;
+ m_ParserFunctions[Layer_AdditionLayer] = &DeserializeParser::ParseAdd;
+ m_ParserFunctions[Layer_MultiplicationLayer] = &DeserializeParser::ParseMultiplication;
}
DeserializeParser::LayerBaseRawPtr DeserializeParser::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex)
@@ -145,6 +146,8 @@ DeserializeParser::LayerBaseRawPtr DeserializeParser::GetBaseLayer(const GraphPt
return graphPtr->layers()->Get(layerIndex)->layer_as_AdditionLayer()->base();
case Layer::Layer_InputLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->base();
+ case Layer::Layer_MultiplicationLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_MultiplicationLayer()->base();
case Layer::Layer_OutputLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->base();
case Layer::Layer_NONE:
@@ -582,6 +585,26 @@ void DeserializeParser::ParseAdd(unsigned int layerIndex)
RegisterOutputSlots(layerIndex, layer);
}
+void DeserializeParser::ParseMultiplication(unsigned int layerIndex)
+{
+ CHECK_LAYERS(m_Graph, 0, layerIndex);
+ auto inputs = GetInputs(m_Graph, layerIndex);
+ CHECK_LOCATION();
+ CHECK_VALID_SIZE(inputs.size(), 2);
+
+ auto outputs = GetOutputs(m_Graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = boost::str(boost::format("Multiplication:%1%") % layerIndex);
+ IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(layerIndex, layer);
+ RegisterOutputSlots(layerIndex, layer);
+}
+
}
diff --git a/src/armnnDeserializeParser/DeserializeParser.hpp b/src/armnnDeserializeParser/DeserializeParser.hpp
index 322826c762..9edd959220 100644
--- a/src/armnnDeserializeParser/DeserializeParser.hpp
+++ b/src/armnnDeserializeParser/DeserializeParser.hpp
@@ -64,6 +64,7 @@ private:
void ParseUnsupportedLayer(unsigned int serializeGraphIndex);
void ParseAdd(unsigned int serializeGraphIndex);
+ void ParseMultiplication(unsigned int serializeGraphIndex);
void RegisterOutputSlotOfConnection(uint32_t connectionIndex, armnn::IOutputSlot* slot);
void RegisterInputSlotOfConnection(uint32_t connectionIndex, armnn::IInputSlot* slot);
diff --git a/src/armnnDeserializeParser/DeserializerSupport.md b/src/armnnDeserializeParser/DeserializerSupport.md
index 7135003421..8e1433419e 100644
--- a/src/armnnDeserializeParser/DeserializerSupport.md
+++ b/src/armnnDeserializeParser/DeserializerSupport.md
@@ -7,5 +7,6 @@ This reference guide provides a list of layers which can be deserialized current
The Arm NN SDK Deserialize parser currently supports the following layers:
* Addition
+* Multiplication
More machine learning layers will be supported in future releases.
diff --git a/src/armnnDeserializeParser/test/DeserializeMultiplication.cpp b/src/armnnDeserializeParser/test/DeserializeMultiplication.cpp
new file mode 100644
index 0000000000..f8eff1697a
--- /dev/null
+++ b/src/armnnDeserializeParser/test/DeserializeMultiplication.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../DeserializeParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(DeserializeParser)
+
+struct MultiplicationFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit MultiplicationFixture(const std::string & inputShape1,
+ const std::string & inputShape2,
+ const std::string & outputShape,
+ const std::string & dataType,
+ const std::string & activation="NONE")
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0, 1],
+ outputIds: [3],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer1",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape1 + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },}},
+ },
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 1,
+ base: {
+ index:1,
+ layerName: "InputLayer2",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape2 + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },}},
+ },
+ {
+ layer_type: "MultiplicationLayer",
+ layer : {
+ base: {
+ index:2,
+ layerName: "MultiplicationLayer",
+ layerType: "Multiplication",
+ inputSlots: [
+ {
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ },
+ {
+ index: 1,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }
+ ],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }},
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 3,
+ base: {
+ index: 3,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:2, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }}},
+ }]
+ }
+ )";
+ Setup();
+ }
+};
+
+
+struct SimpleMultiplicationFixture : MultiplicationFixture
+{
+ SimpleMultiplicationFixture() : MultiplicationFixture("[ 2, 2 ]",
+ "[ 2, 2 ]",
+ "[ 2, 2 ]",
+ "QuantisedAsymm8") {}
+};
+
+struct SimpleMultiplicationFixture2 : MultiplicationFixture
+{
+ SimpleMultiplicationFixture2() : MultiplicationFixture("[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "[ 2, 2, 1, 1 ]",
+ "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(MultiplicationQuantisedAsymm8, SimpleMultiplicationFixture)
+{
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"InputLayer1", { 0, 1, 2, 3 }},
+ {"InputLayer2", { 4, 5, 6, 7 }}},
+ {{"OutputLayer", { 0, 5, 12, 21 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(MultiplicationFloat32, SimpleMultiplicationFixture2)
+{
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ {{"InputLayer1", { 100, 40, 226, 9 }},
+ {"InputLayer2", { 5, 8, 1, 12 }}},
+ {{"OutputLayer", { 500, 320, 226, 108 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/Schema.fbs b/src/armnnSerializer/Schema.fbs
index 2527f6d0f6..5d6388d944 100644
--- a/src/armnnSerializer/Schema.fbs
+++ b/src/armnnSerializer/Schema.fbs
@@ -65,7 +65,8 @@ table OutputSlot {
enum LayerType : uint {
Addition = 0,
Input = 1,
- Output = 2
+ Multiplication = 2,
+ Output = 3
}
// Base layer table to be used as part of other layers
@@ -91,6 +92,10 @@ table InputLayer {
base:BindableLayerBase;
}
+table MultiplicationLayer {
+ base:LayerBase;
+}
+
table OutputLayer {
base:BindableLayerBase;
}
@@ -98,6 +103,7 @@ table OutputLayer {
union Layer {
AdditionLayer,
InputLayer,
+ MultiplicationLayer,
OutputLayer
}
diff --git a/src/armnnSerializer/SeralizerSupport.md b/src/armnnSerializer/SeralizerSupport.md
index 16d1940be0..5978c8a952 100644
--- a/src/armnnSerializer/SeralizerSupport.md
+++ b/src/armnnSerializer/SeralizerSupport.md
@@ -7,5 +7,6 @@ This reference guide provides a list of layers which can be serialized currently
The Arm NN SDK Serializer currently supports the following layers:
* Addition
+* Multiplication
More machine learning layers will be supported in future releases. \ No newline at end of file
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 57baf0e28c..acb672ad1f 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -88,6 +88,20 @@ void Serializer::VisitAdditionLayer(const IConnectableLayer* layer, const char*
CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
}
+// Build FlatBuffer for Multiplication Layer
+void Serializer::VisitMultiplicationLayer(const IConnectableLayer* layer, const char* name)
+{
+ // Create FlatBuffer BaseLayer
+ auto flatBufferMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
+
+ // Create the FlatBuffer MultiplicationLayer
+ auto flatBufferMultiplicationLayer =
+ serializer::CreateMultiplicationLayer(m_flatBufferBuilder, flatBufferMultiplicationBaseLayer);
+
+ // Add the AnyLayer to the FlatBufferLayers
+ CreateAnyLayer(flatBufferMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
+}
+
void Serializer::Serialize(const INetwork& inNetwork)
{
// Iterate through to network
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 697e5cfaa7..8aec3ca42a 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -29,6 +29,9 @@ public:
armnn::LayerBindingId id,
const char* name = nullptr) override;
+ void VisitMultiplicationLayer(const armnn::IConnectableLayer* layer,
+ const char* name = nullptr) override;
+
/// Serializes the network to ArmNN SerializedGraph.
/// @param [in] inNetwork The network to be serialized.
void Serialize(const armnn::INetwork& inNetwork);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 17ad6e3695..ab4bc0fe0b 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -32,4 +32,30 @@ BOOST_AUTO_TEST_CASE(SimpleNetworkSerialization)
BOOST_TEST(stream.str().length() > 0);
}
+BOOST_AUTO_TEST_CASE(SimpleNetworkWithMultiplicationSerialization)
+{
+ const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+
+ armnn::INetworkPtr network = armnn::INetwork::Create();
+ armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
+ armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
+
+ const char* multLayerName = "mult_0";
+
+ armnn::IConnectableLayer* const multiplicationLayer0 = network->AddMultiplicationLayer(multLayerName);
+ inputLayer0->GetOutputSlot(0).Connect(multiplicationLayer0->GetInputSlot(0));
+ inputLayer1->GetOutputSlot(0).Connect(multiplicationLayer0->GetInputSlot(1));
+
+ armnn::IConnectableLayer* const outputLayer0 = network->AddOutputLayer(0);
+ multiplicationLayer0->GetOutputSlot(0).Connect(outputLayer0->GetInputSlot(0));
+
+ armnnSerializer::Serializer serializer;
+ serializer.Serialize(*network);
+
+ std::stringstream stream;
+ serializer.SaveSerializedToStream(stream);
+ BOOST_TEST(stream.str().length() > 0);
+ BOOST_TEST(stream.str().find(multLayerName) != stream.str().npos);
+}
+
BOOST_AUTO_TEST_SUITE_END()