From 6522cdcd8b07aa8f423f832305eed57d79891e92 Mon Sep 17 00:00:00 2001 From: Nattapat Chaimanowong Date: Fri, 1 Mar 2019 16:14:13 +0000 Subject: IVGCVSW-2705 Add Serializer and Deserializer for ResizeBilinear Change-Id: Ibc5689a2e00d38dc98ef39e50ed5dc3b91791e16 Signed-off-by: Nattapat Chaimanowong --- CMakeLists.txt | 1 + src/armnnDeserializer/Deserializer.cpp | 30 +++++ src/armnnDeserializer/Deserializer.hpp | 1 + src/armnnDeserializer/DeserializerSupport.md | 1 + .../test/DeserializeResizeBilinear.cpp | 131 +++++++++++++++++++++ src/armnnSerializer/ArmnnSchema.fbs | 17 ++- src/armnnSerializer/Serializer.cpp | 19 +++ src/armnnSerializer/Serializer.hpp | 4 + src/armnnSerializer/SerializerSupport.md | 1 + src/armnnSerializer/test/SerializerTests.cpp | 42 +++++++ 10 files changed, 245 insertions(+), 2 deletions(-) create mode 100644 src/armnnDeserializer/test/DeserializeResizeBilinear.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index f3ad333f96..62f618a46d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -612,6 +612,7 @@ if(BUILD_UNIT_TESTS) src/armnnDeserializer/test/DeserializePermute.cpp src/armnnDeserializer/test/DeserializePooling2d.cpp src/armnnDeserializer/test/DeserializeReshape.cpp + src/armnnDeserializer/test/DeserializeResizeBilinear.cpp src/armnnDeserializer/test/DeserializeRsqrt.cpp src/armnnDeserializer/test/DeserializeSpaceToBatchNd.cpp src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index aebdf0e52c..aa7454339e 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -205,6 +205,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_PermuteLayer] = &Deserializer::ParsePermute; m_ParserFunctions[Layer_Pooling2dLayer] = &Deserializer::ParsePooling2d; m_ParserFunctions[Layer_ReshapeLayer] = &Deserializer::ParseReshape; + m_ParserFunctions[Layer_ResizeBilinearLayer] = &Deserializer::ParseResizeBilinear; m_ParserFunctions[Layer_RsqrtLayer] = &Deserializer::ParseRsqrt; m_ParserFunctions[Layer_SoftmaxLayer] = &Deserializer::ParseSoftmax; m_ParserFunctions[Layer_SpaceToBatchNdLayer] = &Deserializer::ParseSpaceToBatchNd; @@ -260,6 +261,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->base(); case Layer::Layer_ReshapeLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base(); + case Layer::Layer_ResizeBilinearLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_ResizeBilinearLayer()->base(); case Layer::Layer_RsqrtLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_RsqrtLayer()->base(); case Layer::Layer_SoftmaxLayer: @@ -1431,6 +1434,33 @@ void Deserializer::ParseReshape(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } +void Deserializer::ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + + Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_ResizeBilinearLayer()->descriptor(); + + armnn::ResizeBilinearDescriptor descriptor; + descriptor.m_TargetWidth = flatBufferDescriptor->targetWidth(); + descriptor.m_TargetHeight = flatBufferDescriptor->targetHeight(); + descriptor.m_DataLayout = ToDataLayout(flatBufferDescriptor->dataLayout()); + + auto layerName = GetLayerName(graph, layerIndex); + IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(descriptor, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + void Deserializer::ParseSoftmax(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index 7e25534763..cabc91f58d 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -91,6 +91,7 @@ private: void ParsePermute(GraphPtr graph, unsigned int layerIndex); void ParsePooling2d(GraphPtr graph, unsigned int layerIndex); void ParseReshape(GraphPtr graph, unsigned int layerIndex); + void ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex); void ParseRsqrt(GraphPtr graph, unsigned int layerIndex); void ParseSoftmax(GraphPtr graph, unsigned int layerIndex); void ParseSpaceToBatchNd(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md index ba85a04bb2..0a1ef75260 100644 --- a/src/armnnDeserializer/DeserializerSupport.md +++ b/src/armnnDeserializer/DeserializerSupport.md @@ -26,6 +26,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers: * Permute * Pooling2d * Reshape +* ResizeBilinear * Rsqrt * Softmax * SpaceToBatchNd diff --git a/src/armnnDeserializer/test/DeserializeResizeBilinear.cpp b/src/armnnDeserializer/test/DeserializeResizeBilinear.cpp new file mode 100644 index 0000000000..b01832499e --- /dev/null +++ b/src/armnnDeserializer/test/DeserializeResizeBilinear.cpp @@ -0,0 +1,131 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersSerializeFixture.hpp" +#include "../Deserializer.hpp" + +#include + +BOOST_AUTO_TEST_SUITE(Deserializer) + +struct ResizeBilinearFixture : public ParserFlatbuffersSerializeFixture +{ + explicit ResizeBilinearFixture(const std::string& inputShape, + const std::string& targetWidth, + const std::string& targetHeight, + const std::string& dataLayout, + const std::string& outputShape, + const std::string& dataType) + { + m_JsonString = R"( + { + inputIds: [0], + outputIds: [2], + layers: [ + { + layer_type: "InputLayer", + layer: { + base: { + layerBindingId: 0, + base: { + index: 0, + layerName: "InputLayer", + layerType: "Input", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + inputShape + R"(, + dataType: )" + dataType + R"( + } + }] + } + } + } + }, + { + layer_type: "ResizeBilinearLayer", + layer: { + base: { + index: 1, + layerName: "ResizeBilinearLayer", + layerType: "ResizeBilinear", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: )" + dataType + R"( + } + }] + }, + descriptor: { + targetWidth: )" + targetWidth + R"(, + targetHeight: )" + targetHeight + R"(, + dataLayout: )" + dataLayout + R"(, + } + } + }, + { + layer_type: "OutputLayer", + layer: { + base:{ + layerBindingId: 2, + base: { + index: 2, + layerName: "OutputLayer", + layerType: "Output", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:1, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: )" + dataType + R"( + }, + }], + } + } + }, + } + ] + } + )"; + SetupSingleInputSingleOutput("InputLayer", "OutputLayer"); + } +}; + +struct SimpleResizeBilinearFixture : ResizeBilinearFixture +{ + SimpleResizeBilinearFixture() : ResizeBilinearFixture("[1, 2, 2, 2]", + "1", + "1", + "NCHW", + "[1, 2, 1, 1]", + "Float32") {} +}; + +BOOST_FIXTURE_TEST_CASE(SimpleResizeBilinearFloat32, SimpleResizeBilinearFixture) +{ + RunTest<4, armnn::DataType::Float32>(0, + { + 1.0f, 255.0f, 200.0f, 250.0f, + 250.0f, 200.0f, 250.0f, 1.0f + }, + { + 1.0f, 250.0f + }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 410849ec8b..dea5889186 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -105,7 +105,8 @@ enum LayerType : uint { Rsqrt = 21, Floor = 22, BatchNormalization = 23, - Greater = 24 + Greater = 24, + ResizeBilinear = 25 } // Base layer table to be used as part of other layers @@ -364,6 +365,17 @@ table BatchNormalizationDescriptor { dataLayout:DataLayout; } +table ResizeBilinearLayer { + base:LayerBase; + descriptor:ResizeBilinearDescriptor; +} + +table ResizeBilinearDescriptor { + targetWidth:uint; + targetHeight:uint; + dataLayout:DataLayout; +} + union Layer { ActivationLayer, AdditionLayer, @@ -389,7 +401,8 @@ union Layer { PadLayer, RsqrtLayer, FloorLayer, - GreaterLayer + GreaterLayer, + ResizeBilinearLayer } table AnyLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index b55adb266d..d6df61cbcf 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -424,6 +424,25 @@ void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer, CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer); } +void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer, + const armnn::ResizeBilinearDescriptor& resizeDescriptor, + const char* name) +{ + auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear); + + auto flatBufferDescriptor = + CreateResizeBilinearDescriptor(m_flatBufferBuilder, + resizeDescriptor.m_TargetWidth, + resizeDescriptor.m_TargetHeight, + GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout)); + + auto flatBufferLayer = serializer::CreateResizeBilinearLayer(m_flatBufferBuilder, + flatBufferBaseLayer, + flatBufferDescriptor); + + CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer); +} + void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name) { auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt); diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 164db19e6a..bbc67d49da 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -128,6 +128,10 @@ public: const armnn::ReshapeDescriptor& reshapeDescriptor, const char* name = nullptr) override; + void VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer, + const armnn::ResizeBilinearDescriptor& resizeDescriptor, + const char* name = nullptr) override; + void VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md index f18ef3af68..6071c9c4e8 100644 --- a/src/armnnSerializer/SerializerSupport.md +++ b/src/armnnSerializer/SerializerSupport.md @@ -26,6 +26,7 @@ The Arm NN SDK Serializer currently supports the following layers: * Permute * Pooling2d * Reshape +* ResizeBilinear * Rsqrt * Softmax * SpaceToBatchNd diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 7206d6dc53..ad6676edc7 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -1208,4 +1208,46 @@ BOOST_AUTO_TEST_CASE(SerializeRsqrt) {tensorInfo.GetShape()}); } +BOOST_AUTO_TEST_CASE(SerializeDeserializeResizeBilinear) +{ + class VerifyResizeBilinearName : public armnn::LayerVisitorBase + { + public: + void VisitResizeBilinearLayer(const armnn::IConnectableLayer*, + const armnn::ResizeBilinearDescriptor& descriptor, + const char* name) override + { + BOOST_TEST(name == "ResizeBilinearLayer"); + } + }; + + armnn::ResizeBilinearDescriptor desc; + desc.m_TargetWidth = 4; + desc.m_TargetHeight = 2; + + const armnn::TensorInfo inputTensorInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32); + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const resizeLayer = network->AddResizeBilinearLayer(desc, "ResizeBilinearLayer"); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayer->GetOutputSlot(0).Connect(resizeLayer->GetInputSlot(0)); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + resizeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + resizeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + VerifyResizeBilinearName nameChecker; + deserializedNetwork->Accept(nameChecker); + + CheckDeserializedNetworkAgainstOriginal(*network, + *deserializedNetwork, + {inputTensorInfo.GetShape()}, + {outputTensorInfo.GetShape()}); +} + BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1