aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorConor Kennedy <conor.kennedy@arm.com>2019-03-01 14:37:12 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-03-04 11:24:12 +0000
commitda1f975558a96499ea48cfca54e727a04b175271 (patch)
treee83ed0ac3be1bd404a85cc4d4d9cbaf18e444a67
parent6522cdcd8b07aa8f423f832305eed57d79891e92 (diff)
downloadarmnn-da1f975558a96499ea48cfca54e727a04b175271.tar.gz
IVGCVSW-2711 Add Serializer and Deserializer for Subtraction
Change-Id: I87836b5314c1f791b4df2ca90d239573ca28a2da Signed-off-by: Conor Kennedy <conor.kennedy@arm.com> Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnnDeserializer/Deserializer.cpp23
-rw-r--r--src/armnnDeserializer/Deserializer.hpp1
-rw-r--r--src/armnnDeserializer/DeserializerSupport.md1
-rw-r--r--src/armnnDeserializer/test/DeserializeSubtraction.cpp176
-rw-r--r--src/armnnSerializer/ArmnnSchema.fbs10
-rw-r--r--src/armnnSerializer/Serializer.cpp8
-rw-r--r--src/armnnSerializer/Serializer.hpp3
-rw-r--r--src/armnnSerializer/SerializerSupport.md1
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp40
10 files changed, 262 insertions, 2 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 62f618a46d..60604f84dc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -615,6 +615,7 @@ if(BUILD_UNIT_TESTS)
src/armnnDeserializer/test/DeserializeResizeBilinear.cpp
src/armnnDeserializer/test/DeserializeRsqrt.cpp
src/armnnDeserializer/test/DeserializeSpaceToBatchNd.cpp
+ src/armnnDeserializer/test/DeserializeSubtraction.cpp
src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
src/armnnDeserializer/test/SchemaSerialize.s
)
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index aa7454339e..73c2042024 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -209,6 +209,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer)
m_ParserFunctions[Layer_RsqrtLayer] = &Deserializer::ParseRsqrt;
m_ParserFunctions[Layer_SoftmaxLayer] = &Deserializer::ParseSoftmax;
m_ParserFunctions[Layer_SpaceToBatchNdLayer] = &Deserializer::ParseSpaceToBatchNd;
+ m_ParserFunctions[Layer_SubtractionLayer] = &Deserializer::ParseSubtraction;
}
Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex)
@@ -269,6 +270,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt
return graphPtr->layers()->Get(layerIndex)->layer_as_SoftmaxLayer()->base();
case Layer::Layer_SpaceToBatchNdLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_SpaceToBatchNdLayer()->base();
+ case Layer::Layer_SubtractionLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_SubtractionLayer()->base();
case Layer::Layer_NONE:
default:
throw ParseException(boost::str(
@@ -1639,4 +1642,24 @@ void Deserializer::ParseRsqrt(GraphPtr graph, unsigned int layerIndex)
RegisterOutputSlots(graph, layerIndex, layer);
}
+void Deserializer::ParseSubtraction(GraphPtr graph, unsigned int layerIndex)
+{
+ CHECK_LAYERS(graph, 0, layerIndex);
+ auto inputs = GetInputs(graph, layerIndex);
+ CHECK_LOCATION();
+ CHECK_VALID_SIZE(inputs.size(), 2);
+
+ auto outputs = GetOutputs(graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto layerName = GetLayerName(graph, layerIndex);
+ IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(graph, layerIndex, layer);
+ RegisterOutputSlots(graph, layerIndex, layer);
+}
+
} // namespace armnnDeserializer
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index cabc91f58d..7595818628 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -95,6 +95,7 @@ private:
void ParseRsqrt(GraphPtr graph, unsigned int layerIndex);
void ParseSoftmax(GraphPtr graph, unsigned int layerIndex);
void ParseSpaceToBatchNd(GraphPtr graph, unsigned int layerIndex);
+ void ParseSubtraction(GraphPtr graph, unsigned int layerIndex);
void RegisterOutputSlotOfConnection(uint32_t connectionIndex, armnn::IOutputSlot* slot);
void RegisterInputSlotOfConnection(uint32_t connectionIndex, armnn::IInputSlot* slot);
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
index 0a1ef75260..42da558738 100644
--- a/src/armnnDeserializer/DeserializerSupport.md
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -30,5 +30,6 @@ The Arm NN SDK Deserialize parser currently supports the following layers:
* Rsqrt
* Softmax
* SpaceToBatchNd
+* Subtraction
More machine learning layers will be supported in future releases.
diff --git a/src/armnnDeserializer/test/DeserializeSubtraction.cpp b/src/armnnDeserializer/test/DeserializeSubtraction.cpp
new file mode 100644
index 0000000000..5058bb840d
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeSubtraction.cpp
@@ -0,0 +1,176 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct SubtractionFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit SubtractionFixture(const std::string & inputShape1,
+ const std::string & inputShape2,
+ const std::string & outputShape,
+ const std::string & dataType)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0, 1],
+ outputIds: [3],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "inputLayer1",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape1 + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },
+ }},
+ },
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 1,
+ base: {
+ index:1,
+ layerName: "inputLayer2",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape2 + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ },
+ }},
+ },
+ {
+ layer_type: "SubtractionLayer",
+ layer : {
+ base: {
+ index:2,
+ layerName: "subtractionLayer",
+ layerType: "Subtraction",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ },
+ {
+ index: 1,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }},
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 0,
+ base: {
+ index: 3,
+ layerName: "outputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:2, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }}},
+ }]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct SimpleSubtractionFixture : SubtractionFixture
+{
+ SimpleSubtractionFixture() : SubtractionFixture("[ 1, 4 ]",
+ "[ 1, 4 ]",
+ "[ 1, 4 ]",
+ "QuantisedAsymm8") {}
+};
+
+struct SimpleSubtractionFixture2 : SubtractionFixture
+{
+ SimpleSubtractionFixture2() : SubtractionFixture("[ 1, 4 ]",
+ "[ 1, 4 ]",
+ "[ 1, 4 ]",
+ "Float32") {}
+};
+
+struct SimpleSubtractionFixtureBroadcast : SubtractionFixture
+{
+ SimpleSubtractionFixtureBroadcast() : SubtractionFixture("[ 1, 4 ]",
+ "[ 1, 1 ]",
+ "[ 1, 4 ]",
+ "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SubtractionQuantisedAsymm8, SimpleSubtractionFixture)
+{
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputLayer1", { 4, 5, 6, 7 }},
+ {"inputLayer2", { 3, 2, 1, 0 }}},
+ {{"outputLayer", { 1, 3, 5, 7 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(SubtractionFloat32, SimpleSubtractionFixture2)
+{
+ RunTest<2, armnn::DataType::Float32>(
+ 0,
+ {{"inputLayer1", { 4, 5, 6, 7 }},
+ {"inputLayer2", { 3, 2, 1, 0 }}},
+ {{"outputLayer", { 1, 3, 5, 7 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(SubtractionBroadcast, SimpleSubtractionFixtureBroadcast)
+{
+ RunTest<2, armnn::DataType::Float32>(
+ 0,
+ {{"inputLayer1", { 4, 5, 6, 7 }},
+ {"inputLayer2", { 2 }}},
+ {{"outputLayer", { 2, 3, 4, 5 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index dea5889186..9f1d8ecc9f 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -106,7 +106,8 @@ enum LayerType : uint {
Floor = 22,
BatchNormalization = 23,
Greater = 24,
- ResizeBilinear = 25
+ ResizeBilinear = 25,
+ Subtraction = 26
}
// Base layer table to be used as part of other layers
@@ -302,6 +303,10 @@ table SpaceToBatchNdDescriptor {
dataLayout:DataLayout;
}
+table SubtractionLayer {
+ base:LayerBase;
+}
+
table BatchToSpaceNdLayer {
base:LayerBase;
descriptor:BatchToSpaceNdDescriptor;
@@ -402,7 +407,8 @@ union Layer {
RsqrtLayer,
FloorLayer,
GreaterLayer,
- ResizeBilinearLayer
+ ResizeBilinearLayer,
+ SubtractionLayer
}
table AnyLayer {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index d6df61cbcf..93231d0366 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -588,6 +588,14 @@ void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer*
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
}
+void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
+{
+ auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
+ auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
+
+ CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
+}
+
fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
const serializer::LayerType layerType)
{
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index bbc67d49da..b6f5974265 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -146,6 +146,9 @@ public:
void VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
const armnn::NormalizationDescriptor& normalizationDescriptor,
const char* name = nullptr) override;
+
+ void VisitSubtractionLayer(const armnn::IConnectableLayer* layer,
+ const char* name = nullptr) override;
private:
/// Creates the Input Slots and Output Slots and LayerBase for the layer.
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index 6071c9c4e8..141a6faa45 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -30,5 +30,6 @@ The Arm NN SDK Serializer currently supports the following layers:
* Rsqrt
* Softmax
* SpaceToBatchNd
+* Subtraction
More machine learning layers will be supported in future releases.
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index ad6676edc7..572ee80011 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -1250,4 +1250,44 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeResizeBilinear)
{outputTensorInfo.GetShape()});
}
+BOOST_AUTO_TEST_CASE(SerializeDeserializeSubtraction)
+{
+ class VerifySubtractionName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+ {
+ public:
+ void VisitSubtractionLayer(const armnn::IConnectableLayer*, const char* name) override
+ {
+ BOOST_TEST(name == "subtraction");
+ }
+ };
+
+ const armnn::TensorInfo info = armnn::TensorInfo({ 1, 4 }, armnn::DataType::Float32);
+
+ armnn::INetworkPtr network = armnn::INetwork::Create();
+ armnn::IConnectableLayer* const inputLayer0 = network->AddInputLayer(0);
+ armnn::IConnectableLayer* const inputLayer1 = network->AddInputLayer(1);
+ armnn::IConnectableLayer* const subtractionLayer = network->AddSubtractionLayer("subtraction");
+ armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+ inputLayer0->GetOutputSlot(0).Connect(subtractionLayer->GetInputSlot(0));
+ inputLayer1->GetOutputSlot(0).Connect(subtractionLayer->GetInputSlot(1));
+ subtractionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ inputLayer0->GetOutputSlot(0).SetTensorInfo(info);
+ inputLayer1->GetOutputSlot(0).SetTensorInfo(info);
+ subtractionLayer->GetOutputSlot(0).SetTensorInfo(info);
+
+ armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+ BOOST_CHECK(deserializedNetwork);
+
+ VerifySubtractionName nameChecker;
+ deserializedNetwork->Accept(nameChecker);
+
+ CheckDeserializedNetworkAgainstOriginal(*network,
+ *deserializedNetwork,
+ {info.GetShape(), info.GetShape()},
+ {info.GetShape()},
+ {0, 1});
+}
+
BOOST_AUTO_TEST_SUITE_END()