aboutsummaryrefslogtreecommitdiff
path: root/src/armnnDeserializer
diff options
context:
space:
mode:
authorruoyan01 <ruomei.yan@arm.com>2019-02-28 15:09:07 +0000
committerruoyan01 <ruomei.yan@arm.com>2019-03-01 16:39:27 +0000
commit8e7fa232b4e637cc02f2ca344b2113c63cdc7e5a (patch)
tree3c200afe3c7cab37b553ba0461aed4410b7cfbb8 /src/armnnDeserializer
parentdd2ba7ebf78a75aadd8ddd2ae1a4226ffc4ae4d9 (diff)
downloadarmnn-8e7fa232b4e637cc02f2ca344b2113c63cdc7e5a.tar.gz
IVGCVSW-2681 Serialize/de-serialize the BatchNormalization layer
Change-Id: I418c4465366742262fb6e6c1eeba76c634beaeb5 Signed-off-by: ruoyan01 <ruomei.yan@arm.com>
Diffstat (limited to 'src/armnnDeserializer')
-rw-r--r--src/armnnDeserializer/Deserializer.cpp40
-rw-r--r--src/armnnDeserializer/Deserializer.hpp1
-rw-r--r--src/armnnDeserializer/DeserializerSupport.md1
-rw-r--r--src/armnnDeserializer/test/DeserializeBatchNormalization.cpp172
4 files changed, 214 insertions, 0 deletions
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 77bd7498ee..e8cda2e3d3 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -188,6 +188,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer)
m_ParserFunctions[Layer_ActivationLayer] = &Deserializer::ParseActivation;
m_ParserFunctions[Layer_AdditionLayer] = &Deserializer::ParseAdd;
m_ParserFunctions[Layer_BatchToSpaceNdLayer] = &Deserializer::ParseBatchToSpaceNd;
+ m_ParserFunctions[Layer_BatchNormalizationLayer] = &Deserializer::ParseBatchNormalization;
m_ParserFunctions[Layer_ConstantLayer] = &Deserializer::ParseConstant;
m_ParserFunctions[Layer_Convolution2dLayer] = &Deserializer::ParseConvolution2d;
m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &Deserializer::ParseDepthwiseConvolution2d;
@@ -220,6 +221,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt
return graphPtr->layers()->Get(layerIndex)->layer_as_AdditionLayer()->base();
case Layer::Layer_BatchToSpaceNdLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_BatchToSpaceNdLayer()->base();
+ case Layer::Layer_BatchNormalizationLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_BatchNormalizationLayer()->base();
case Layer::Layer_ConstantLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_ConstantLayer()->base();
case Layer::Layer_Convolution2dLayer:
@@ -848,6 +851,43 @@ void Deserializer::ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex)
RegisterOutputSlots(graph, layerIndex, layer);
}
+void Deserializer::ParseBatchNormalization(GraphPtr graph, unsigned int layerIndex)
+{
+ CHECK_LAYERS(graph, 0, layerIndex);
+
+ auto inputs = GetInputs(graph, layerIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+ auto outputInfo = ToTensorInfo(outputs[0]);
+
+ auto layerName = boost::str(boost::format("BatchNormalization:%1%") % layerIndex);
+
+ auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_BatchNormalizationLayer();
+ auto serializerDescriptor = serializerLayer->descriptor();
+
+ armnn::BatchNormalizationDescriptor descriptor;
+ descriptor.m_Eps = serializerDescriptor->eps();
+ descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout());
+
+ armnn::ConstTensor mean = ToConstTensor(serializerLayer->mean());
+ armnn::ConstTensor variance = ToConstTensor(serializerLayer->variance());
+ armnn::ConstTensor beta = ToConstTensor(serializerLayer->beta());
+ armnn::ConstTensor gamma = ToConstTensor(serializerLayer->gamma());
+
+ IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(descriptor,
+ mean,
+ variance,
+ beta,
+ gamma,
+ layerName.c_str());
+ layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ RegisterInputSlots(graph, layerIndex, layer);
+ RegisterOutputSlots(graph, layerIndex, layer);
+}
+
void Deserializer::ParseConstant(GraphPtr graph, unsigned int layerIndex)
{
CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index aa5035ed31..237cb9f975 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -74,6 +74,7 @@ private:
void ParseActivation(GraphPtr graph, unsigned int layerIndex);
void ParseAdd(GraphPtr graph, unsigned int layerIndex);
void ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex);
+ void ParseBatchNormalization(GraphPtr graph, unsigned int layerIndex);
void ParseConstant(GraphPtr graph, unsigned int layerIndex);
void ParseConvolution2d(GraphPtr graph, unsigned int layerIndex);
void ParseDepthwiseConvolution2d(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
index 765432b8e4..b5e9b6b0a2 100644
--- a/src/armnnDeserializer/DeserializerSupport.md
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -9,6 +9,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers:
* Activation
* Addition
* BatchToSpaceNd
+* BatchNormalization
* Constant
* Convolution2d
* DepthwiseConvolution2d
diff --git a/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp b/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp
new file mode 100644
index 0000000000..3e1be6cf72
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp
@@ -0,0 +1,172 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct BatchNormalizationFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit BatchNormalizationFixture(const std::string &inputShape,
+ const std::string &outputShape,
+ const std::string &meanShape,
+ const std::string &varianceShape,
+ const std::string &offsetShape,
+ const std::string &scaleShape,
+ const std::string &dataType,
+ const std::string &dataLayout)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: ")" + dataType + R"(",
+ quantizationScale: 0.5,
+ quantizationOffset: 0
+ },
+ }]
+ },
+ }
+ },
+ },
+ {
+ layer_type: "BatchNormalizationLayer",
+ layer : {
+ base: {
+ index:1,
+ layerName: "BatchNormalizationLayer",
+ layerType: "BatchNormalization",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: ")" + dataType + R"("
+ },
+ }],
+ },
+ descriptor: {
+ eps: 0.0010000000475,
+ dataLayout: ")" + dataLayout + R"("
+ },
+ mean: {
+ info: {
+ dimensions: )" + meanShape + R"(,
+ dataType: ")" + dataType + R"("
+ },
+ data_type: IntData,
+ data: {
+ data: [1084227584],
+ }
+ },
+ variance: {
+ info: {
+ dimensions: )" + varianceShape + R"(,
+ dataType: ")" + dataType + R"("
+ },
+ data_type: IntData,
+ data: {
+ data: [1073741824],
+ }
+ },
+ beta: {
+ info: {
+ dimensions: )" + offsetShape + R"(,
+ dataType: ")" + dataType + R"("
+ },
+ data_type: IntData,
+ data: {
+ data: [0],
+ }
+ },
+ gamma: {
+ info: {
+ dimensions: )" + scaleShape + R"(,
+ dataType: ")" + dataType + R"("
+ },
+ data_type: IntData,
+ data: {
+ data: [1065353216],
+ }
+ },
+ },
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 0,
+ base: {
+ index: 2,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }],
+ outputSlots: [ {
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: ")" + dataType + R"("
+ },
+ }],
+ }
+ }},
+ }]
+ }
+)";
+ Setup();
+ }
+};
+
+struct BatchNormFixture : BatchNormalizationFixture
+{
+ BatchNormFixture():BatchNormalizationFixture("[ 1, 3, 3, 1 ]",
+ "[ 1, 3, 3, 1 ]",
+ "[ 1 ]",
+ "[ 1 ]",
+ "[ 1 ]",
+ "[ 1 ]",
+ "Float32",
+ "NHWC"){}
+};
+
+BOOST_FIXTURE_TEST_CASE(BatchNormalizationFloat32, BatchNormFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(0,
+ {{"InputLayer", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f }}},
+ {{"OutputLayer",{ -2.8277204f, -2.12079024f, -1.4138602f,
+ -0.7069301f, 0.0f, 0.7069301f,
+ 1.4138602f, 2.12079024f, 2.8277204f }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()