From 8e7fa232b4e637cc02f2ca344b2113c63cdc7e5a Mon Sep 17 00:00:00 2001 From: ruoyan01 Date: Thu, 28 Feb 2019 15:09:07 +0000 Subject: IVGCVSW-2681 Serialize/de-serialize the BatchNormalization layer Change-Id: I418c4465366742262fb6e6c1eeba76c634beaeb5 Signed-off-by: ruoyan01 --- CMakeLists.txt | 1 + src/armnnDeserializer/Deserializer.cpp | 40 +++++ src/armnnDeserializer/Deserializer.hpp | 1 + src/armnnDeserializer/DeserializerSupport.md | 1 + .../test/DeserializeBatchNormalization.cpp | 172 +++++++++++++++++++++ src/armnnSerializer/ArmnnSchema.fbs | 19 ++- src/armnnSerializer/Serializer.cpp | 29 ++++ src/armnnSerializer/Serializer.hpp | 8 + src/armnnSerializer/SerializerSupport.md | 1 + src/armnnSerializer/test/SerializerTests.cpp | 68 ++++++++ 10 files changed, 339 insertions(+), 1 deletion(-) create mode 100644 src/armnnDeserializer/test/DeserializeBatchNormalization.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index dd30cdb19d..0c8a7b906a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -598,6 +598,7 @@ if(BUILD_UNIT_TESTS) src/armnnDeserializer/test/DeserializeActivation.cpp src/armnnDeserializer/test/DeserializeAdd.cpp src/armnnDeserializer/test/DeserializeBatchToSpaceNd.cpp + src/armnnDeserializer/test/DeserializeBatchNormalization.cpp src/armnnDeserializer/test/DeserializeConstant.cpp src/armnnDeserializer/test/DeserializeConvolution2d.cpp src/armnnDeserializer/test/DeserializeDivision.cpp diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 77bd7498ee..e8cda2e3d3 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -188,6 +188,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_ActivationLayer] = &Deserializer::ParseActivation; m_ParserFunctions[Layer_AdditionLayer] = &Deserializer::ParseAdd; m_ParserFunctions[Layer_BatchToSpaceNdLayer] = &Deserializer::ParseBatchToSpaceNd; + m_ParserFunctions[Layer_BatchNormalizationLayer] = &Deserializer::ParseBatchNormalization; m_ParserFunctions[Layer_ConstantLayer] = &Deserializer::ParseConstant; m_ParserFunctions[Layer_Convolution2dLayer] = &Deserializer::ParseConvolution2d; m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &Deserializer::ParseDepthwiseConvolution2d; @@ -220,6 +221,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_AdditionLayer()->base(); case Layer::Layer_BatchToSpaceNdLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_BatchToSpaceNdLayer()->base(); + case Layer::Layer_BatchNormalizationLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_BatchNormalizationLayer()->base(); case Layer::Layer_ConstantLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_ConstantLayer()->base(); case Layer::Layer_Convolution2dLayer: @@ -848,6 +851,43 @@ void Deserializer::ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } +void Deserializer::ParseBatchNormalization(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + + auto inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + auto outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + auto outputInfo = ToTensorInfo(outputs[0]); + + auto layerName = boost::str(boost::format("BatchNormalization:%1%") % layerIndex); + + auto serializerLayer = graph->layers()->Get(layerIndex)->layer_as_BatchNormalizationLayer(); + auto serializerDescriptor = serializerLayer->descriptor(); + + armnn::BatchNormalizationDescriptor descriptor; + descriptor.m_Eps = serializerDescriptor->eps(); + descriptor.m_DataLayout = ToDataLayout(serializerDescriptor->dataLayout()); + + armnn::ConstTensor mean = ToConstTensor(serializerLayer->mean()); + armnn::ConstTensor variance = ToConstTensor(serializerLayer->variance()); + armnn::ConstTensor beta = ToConstTensor(serializerLayer->beta()); + armnn::ConstTensor gamma = ToConstTensor(serializerLayer->gamma()); + + IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(descriptor, + mean, + variance, + beta, + gamma, + layerName.c_str()); + layer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + void Deserializer::ParseConstant(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index aa5035ed31..237cb9f975 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -74,6 +74,7 @@ private: void ParseActivation(GraphPtr graph, unsigned int layerIndex); void ParseAdd(GraphPtr graph, unsigned int layerIndex); void ParseBatchToSpaceNd(GraphPtr graph, unsigned int layerIndex); + void ParseBatchNormalization(GraphPtr graph, unsigned int layerIndex); void ParseConstant(GraphPtr graph, unsigned int layerIndex); void ParseConvolution2d(GraphPtr graph, unsigned int layerIndex); void ParseDepthwiseConvolution2d(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md index 765432b8e4..b5e9b6b0a2 100644 --- a/src/armnnDeserializer/DeserializerSupport.md +++ b/src/armnnDeserializer/DeserializerSupport.md @@ -9,6 +9,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers: * Activation * Addition * BatchToSpaceNd +* BatchNormalization * Constant * Convolution2d * DepthwiseConvolution2d diff --git a/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp b/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp new file mode 100644 index 0000000000..3e1be6cf72 --- /dev/null +++ b/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp @@ -0,0 +1,172 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersSerializeFixture.hpp" +#include "../Deserializer.hpp" + +#include +#include + +BOOST_AUTO_TEST_SUITE(Deserializer) + +struct BatchNormalizationFixture : public ParserFlatbuffersSerializeFixture +{ + explicit BatchNormalizationFixture(const std::string &inputShape, + const std::string &outputShape, + const std::string &meanShape, + const std::string &varianceShape, + const std::string &offsetShape, + const std::string &scaleShape, + const std::string &dataType, + const std::string &dataLayout) + { + m_JsonString = R"( + { + inputIds: [0], + outputIds: [2], + layers: [ + { + layer_type: "InputLayer", + layer: { + base: { + layerBindingId: 0, + base: { + index: 0, + layerName: "InputLayer", + layerType: "Input", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + inputShape + R"(, + dataType: ")" + dataType + R"(", + quantizationScale: 0.5, + quantizationOffset: 0 + }, + }] + }, + } + }, + }, + { + layer_type: "BatchNormalizationLayer", + layer : { + base: { + index:1, + layerName: "BatchNormalizationLayer", + layerType: "BatchNormalization", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: ")" + dataType + R"(" + }, + }], + }, + descriptor: { + eps: 0.0010000000475, + dataLayout: ")" + dataLayout + R"(" + }, + mean: { + info: { + dimensions: )" + meanShape + R"(, + dataType: ")" + dataType + R"(" + }, + data_type: IntData, + data: { + data: [1084227584], + } + }, + variance: { + info: { + dimensions: )" + varianceShape + R"(, + dataType: ")" + dataType + R"(" + }, + data_type: IntData, + data: { + data: [1073741824], + } + }, + beta: { + info: { + dimensions: )" + offsetShape + R"(, + dataType: ")" + dataType + R"(" + }, + data_type: IntData, + data: { + data: [0], + } + }, + gamma: { + info: { + dimensions: )" + scaleShape + R"(, + dataType: ")" + dataType + R"(" + }, + data_type: IntData, + data: { + data: [1065353216], + } + }, + }, + }, + { + layer_type: "OutputLayer", + layer: { + base:{ + layerBindingId: 0, + base: { + index: 2, + layerName: "OutputLayer", + layerType: "Output", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:1, outputSlotIndex:0 }, + }], + outputSlots: [ { + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: ")" + dataType + R"(" + }, + }], + } + }}, + }] + } +)"; + Setup(); + } +}; + +struct BatchNormFixture : BatchNormalizationFixture +{ + BatchNormFixture():BatchNormalizationFixture("[ 1, 3, 3, 1 ]", + "[ 1, 3, 3, 1 ]", + "[ 1 ]", + "[ 1 ]", + "[ 1 ]", + "[ 1 ]", + "Float32", + "NHWC"){} +}; + +BOOST_FIXTURE_TEST_CASE(BatchNormalizationFloat32, BatchNormFixture) +{ + RunTest<4, armnn::DataType::Float32>(0, + {{"InputLayer", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f }}}, + {{"OutputLayer",{ -2.8277204f, -2.12079024f, -1.4138602f, + -0.7069301f, 0.0f, 0.7069301f, + 1.4138602f, 2.12079024f, 2.8277204f }}}); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 4e3180f0ff..f416912618 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -103,7 +103,8 @@ enum LayerType : uint { Normalization = 19, Pad = 20, Rsqrt = 21, - Floor = 22 + Floor = 22, + BatchNormalization = 23 } // Base layer table to be used as part of other layers @@ -340,14 +341,30 @@ table PadDescriptor { padList:[uint]; } + table RsqrtLayer { base:LayerBase; } +table BatchNormalizationLayer { + base:LayerBase; + descriptor:BatchNormalizationDescriptor; + mean:ConstTensor; + variance:ConstTensor; + beta:ConstTensor; + gamma:ConstTensor; +} + +table BatchNormalizationDescriptor { + eps:float; + dataLayout:DataLayout; +} + union Layer { ActivationLayer, AdditionLayer, BatchToSpaceNdLayer, + BatchNormalizationLayer, ConstantLayer, Convolution2dLayer, DepthwiseConvolution2dLayer, diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index d40cdfa591..423706ceb3 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -170,6 +170,35 @@ void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer); } +void SerializerVisitor::VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer, + const armnn::BatchNormalizationDescriptor& batchNormDescriptor, + const armnn::ConstTensor& mean, + const armnn::ConstTensor& variance, + const armnn::ConstTensor& beta, + const armnn::ConstTensor& gamma, + const char* name) +{ + auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization); + auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor( + m_flatBufferBuilder, + batchNormDescriptor.m_Eps, + GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout)); + + auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean); + auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance); + auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta); + auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma); + auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder, + fbBatchNormalizationBaseLayer, + fbBatchNormalizationDescriptor, + fbMeanConstTensorInfo, + fbVarianceConstTensorInfo, + fbBetaConstTensorInfo, + fbGammaConstTensorInfo); + + CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer); +} + // Build FlatBuffer for Constant Layer void SerializerVisitor::VisitConstantLayer(const armnn::IConnectableLayer* layer, const armnn::ConstTensor& input, diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index bf599b1a1f..a60d19b860 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -53,6 +53,14 @@ public: const armnn::BatchToSpaceNdDescriptor& descriptor, const char* name = nullptr) override; + void VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer, + const armnn::BatchNormalizationDescriptor& BatchNormalizationDescriptor, + const armnn::ConstTensor& mean, + const armnn::ConstTensor& variance, + const armnn::ConstTensor& beta, + const armnn::ConstTensor& gamma, + const char* name = nullptr) override; + void VisitConstantLayer(const armnn::IConnectableLayer* layer, const armnn::ConstTensor& input, const char* = nullptr) override; diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md index b5a322f525..98023a6771 100644 --- a/src/armnnSerializer/SerializerSupport.md +++ b/src/armnnSerializer/SerializerSupport.md @@ -9,6 +9,7 @@ The Arm NN SDK Serializer currently supports the following layers: * Activation * Addition * BatchToSpaceNd +* BatchNormalization * Constant * Convolution2d * DepthwiseConvolution2d diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 5f1745bda5..3ef15831b1 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -888,6 +888,74 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeBatchToSpaceNd) {outputTensorInfo.GetShape()}); } +BOOST_AUTO_TEST_CASE(SerializeDeserializeBatchNormalization) +{ + class VerifyBatchNormalizationName : public armnn::LayerVisitorBase + { + public: + void VisitBatchNormalizationLayer(const armnn::IConnectableLayer*, + const armnn::BatchNormalizationDescriptor&, + const armnn::ConstTensor&, + const armnn::ConstTensor&, + const armnn::ConstTensor&, + const armnn::ConstTensor&, + const char* name) override + { + BOOST_TEST(name == "BatchNormalization:1"); + } + }; + + armnn::TensorInfo inputInfo ({ 1, 3, 3, 1 }, armnn::DataType::Float32); + armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); + + armnn::TensorInfo meanInfo({1}, armnn::DataType::Float32); + armnn::TensorInfo varianceInfo({1}, armnn::DataType::Float32); + armnn::TensorInfo scaleInfo({1}, armnn::DataType::Float32); + armnn::TensorInfo offsetInfo({1}, armnn::DataType::Float32); + + armnn::BatchNormalizationDescriptor descriptor; + descriptor.m_Eps = 0.0010000000475f; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + std::vector meanData({5.0}); + std::vector varianceData({2.0}); + std::vector scaleData({1.0}); + std::vector offsetData({0.0}); + + armnn::ConstTensor mean(meanInfo, meanData); + armnn::ConstTensor variance(varianceInfo, varianceData); + armnn::ConstTensor scale(scaleInfo, scaleData); + armnn::ConstTensor offset(offsetInfo, offsetData); + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const batchNormalizationLayer = network->AddBatchNormalizationLayer( + descriptor, + mean, + variance, + scale, + offset, + "BatchNormalizationLayer"); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + + batchNormalizationLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + batchNormalizationLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyBatchNormalizationName nameChecker; + deserializedNetwork->Accept(nameChecker); + + CheckDeserializedNetworkAgainstOriginal(*network, + *deserializedNetwork, + {inputInfo.GetShape()}, + {outputInfo.GetShape()}); +} + BOOST_AUTO_TEST_CASE(SerializeDivision) { class VerifyDivisionName : public armnn::LayerVisitorBase -- cgit v1.2.1