From 51982472bfedf12e7d82cde6614617f94b2c86d0 Mon Sep 17 00:00:00 2001 From: Ellen Norris-Thompson Date: Wed, 19 Jun 2019 11:46:21 +0100 Subject: IVGCVSW-3269 Add Serialization support for the new Prelu Activation layer * Adds serialization/deserialization support * Adds related unit test Signed-off-by: Ellen Norris-Thompson Change-Id: I600322b03e51f443cbcd9262bb27e36e5fd95ae5 --- src/armnnDeserializer/Deserializer.cpp | 23 +++++++++++++++ src/armnnDeserializer/Deserializer.hpp | 1 + src/armnnDeserializer/DeserializerSupport.md | 1 + src/armnnSerializer/ArmnnSchema.fbs | 10 +++++-- src/armnnSerializer/Serializer.cpp | 9 +++++- src/armnnSerializer/SerializerSupport.md | 1 + src/armnnSerializer/test/SerializerTests.cpp | 43 ++++++++++++++++++++++++++++ 7 files changed, 85 insertions(+), 3 deletions(-) diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 68f3e8d566..b23ed97b4c 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -213,6 +213,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_PadLayer] = &Deserializer::ParsePad; m_ParserFunctions[Layer_PermuteLayer] = &Deserializer::ParsePermute; m_ParserFunctions[Layer_Pooling2dLayer] = &Deserializer::ParsePooling2d; + m_ParserFunctions[Layer_PreluLayer] = &Deserializer::ParsePrelu; m_ParserFunctions[Layer_QuantizeLayer] = &Deserializer::ParseQuantize; m_ParserFunctions[Layer_ReshapeLayer] = &Deserializer::ParseReshape; m_ParserFunctions[Layer_ResizeBilinearLayer] = &Deserializer::ParseResizeBilinear; @@ -292,6 +293,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_PermuteLayer()->base(); case Layer::Layer_Pooling2dLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_Pooling2dLayer()->base(); + case Layer::Layer_PreluLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_PreluLayer()->base(); case Layer::Layer_QuantizeLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizeLayer()->base(); case Layer::Layer_ReshapeLayer: @@ -2194,4 +2197,24 @@ void Deserializer::ParseSwitch(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } +void Deserializer::ParsePrelu(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + auto inputs = GetInputs(graph, layerIndex); + CHECK_LOCATION(); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = GetLayerName(graph, layerIndex); + IConnectableLayer* layer = m_Network->AddPreluLayer(layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + } // namespace armnnDeserializer diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index 20ab8c2bba..bef13b1c08 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -103,6 +103,7 @@ private: void ParsePad(GraphPtr graph, unsigned int layerIndex); void ParsePermute(GraphPtr graph, unsigned int layerIndex); void ParsePooling2d(GraphPtr graph, unsigned int layerIndex); + void ParsePrelu(GraphPtr graph, unsigned int layerIndex); void ParseQuantize(GraphPtr graph, unsigned int layerIndex); void ParseReshape(GraphPtr graph, unsigned int layerIndex); void ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md index 94c26fc808..b64045903a 100644 --- a/src/armnnDeserializer/DeserializerSupport.md +++ b/src/armnnDeserializer/DeserializerSupport.md @@ -33,6 +33,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers: * Pad * Permute * Pooling2d +* Prelu * Quantize * Reshape * ResizeBilinear diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 1204ad8f0f..db5672f948 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -122,7 +122,8 @@ enum LayerType : uint { Merge = 37, Switch = 38, Concat = 39, - SpaceToDepth = 40 + SpaceToDepth = 40, + Prelu = 41 } // Base layer table to be used as part of other layers @@ -555,6 +556,10 @@ table SwitchLayer { base:LayerBase; } +table PreluLayer { + base:LayerBase; +} + union Layer { ActivationLayer, AdditionLayer, @@ -596,7 +601,8 @@ union Layer { MergeLayer, SwitchLayer, ConcatLayer, - SpaceToDepthLayer + SpaceToDepthLayer, + PreluLayer } table AnyLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 012ed666f1..81231e4eba 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -712,7 +712,14 @@ void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* laye void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer, const char* name) { - throw UnimplementedException("SerializerVisitor::VisitPreluLayer not yet implemented"); + // Create FlatBuffer BaseLayer + auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu); + + // Create the FlatBuffer AdditionLayer + auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer); + + // Add the AnyLayer to the FlatBufferLayers + CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer); } void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name) diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md index 865cdf0610..e19eb32639 100644 --- a/src/armnnSerializer/SerializerSupport.md +++ b/src/armnnSerializer/SerializerSupport.md @@ -33,6 +33,7 @@ The Arm NN SDK Serializer currently supports the following layers: * Pad * Permute * Pooling2d +* Prelu * Quantize * Reshape * ResizeBilinear diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index a757e16436..812a4780f4 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -1499,6 +1499,49 @@ BOOST_AUTO_TEST_CASE(SerializeMultiplication) deserializedNetwork->Accept(verifier); } +BOOST_AUTO_TEST_CASE(SerializePrelu) +{ + class PreluLayerVerifier : public LayerVerifierBase + { + public: + PreluLayerVerifier(const std::string& layerName, + const std::vector& inputInfos, + const std::vector& outputInfos) + : LayerVerifierBase(layerName, inputInfos, outputInfos) {} + + void VisitPreluLayer(const armnn::IConnectableLayer* layer, const char* name) override + { + VerifyNameAndConnections(layer, name); + } + }; + + const std::string layerName("prelu"); + + armnn::TensorInfo inputTensorInfo ({ 4, 1, 2 }, armnn::DataType::Float32); + armnn::TensorInfo alphaTensorInfo ({ 5, 4, 3, 1 }, armnn::DataType::Float32); + armnn::TensorInfo outputTensorInfo({ 5, 4, 3, 2 }, armnn::DataType::Float32); + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const alphaLayer = network->AddInputLayer(1); + armnn::IConnectableLayer* const preluLayer = network->AddPreluLayer(layerName.c_str()); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayer->GetOutputSlot(0).Connect(preluLayer->GetInputSlot(0)); + alphaLayer->GetOutputSlot(0).Connect(preluLayer->GetInputSlot(1)); + preluLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + alphaLayer->GetOutputSlot(0).SetTensorInfo(alphaTensorInfo); + preluLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + PreluLayerVerifier verifier(layerName, {inputTensorInfo, alphaTensorInfo}, {outputTensorInfo}); + deserializedNetwork->Accept(verifier); +} + BOOST_AUTO_TEST_CASE(SerializeNormalization) { class NormalizationLayerVerifier : public LayerVerifierBase -- cgit v1.2.1