From 2605b236d103e1ba27069e0d668599042a4761af Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Wed, 10 Jun 2020 15:53:46 +0100 Subject: IVGCVSW-4624 Add a RANK Reference Implementation * Add Rank front end * Add Rank reference implementation * Add Rank serialization support * Add Scalar serialization support Signed-off-by: Finn Williams Change-Id: I06e4a468c2a84e79bae2e6c5348596bbbf853b4b --- src/armnnDeserializer/Deserializer.cpp | 35 +++++- src/armnnDeserializer/Deserializer.hpp | 3 +- src/armnnDeserializer/DeserializerSupport.md | 1 + src/armnnDeserializer/test/DeserializeRank.cpp | 151 +++++++++++++++++++++++++ 4 files changed, 188 insertions(+), 2 deletions(-) create mode 100644 src/armnnDeserializer/test/DeserializeRank.cpp (limited to 'src/armnnDeserializer') diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 31fae2af86..7143cdbdcc 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -225,6 +225,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_QLstmLayer] = &Deserializer::ParseQLstm; m_ParserFunctions[Layer_QuantizeLayer] = &Deserializer::ParseQuantize; m_ParserFunctions[Layer_QuantizedLstmLayer] = &Deserializer::ParseQuantizedLstm; + m_ParserFunctions[Layer_RankLayer] = &Deserializer::ParseRank; m_ParserFunctions[Layer_ReshapeLayer] = &Deserializer::ParseReshape; m_ParserFunctions[Layer_ResizeBilinearLayer] = &Deserializer::ParseResizeBilinear; m_ParserFunctions[Layer_ResizeLayer] = &Deserializer::ParseResize; @@ -331,6 +332,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizeLayer()->base(); case Layer::Layer_QuantizedLstmLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizedLstmLayer()->base(); + case Layer::Layer_RankLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_RankLayer()->base(); case Layer::Layer_ReshapeLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base(); case Layer::Layer_ResizeBilinearLayer: @@ -545,6 +548,16 @@ armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr) } } + if (tensorPtr->dimensionality() == static_cast(Dimensionality::Scalar)) + { + float quantizationScale = tensorPtr->quantizationScale(); + int32_t quantizationOffset = tensorPtr->quantizationOffset(); + + return armnn::TensorInfo(armnn::TensorShape{armnn::Dimensionality::Scalar}, + type, + quantizationScale, + quantizationOffset); + } auto dimensions = tensorPtr->dimensions(); unsigned int size = dimensions->size(); @@ -2008,6 +2021,26 @@ armnn::TensorInfo Deserializer::OutputShapeOfReshape(const armnn::TensorInfo& in return reshapeInfo; } +void Deserializer::ParseRank(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + + Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto layerName = GetLayerName(graph, layerIndex); + IConnectableLayer* layer = m_Network->AddRankLayer( layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + void Deserializer::ParseReshape(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index 69868c210a..4a1bdad8a8 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -118,6 +118,7 @@ private: void ParsePrelu(GraphPtr graph, unsigned int layerIndex); void ParseQLstm(GraphPtr graph, unsigned int layerIndex); void ParseQuantize(GraphPtr graph, unsigned int layerIndex); + void ParseRank(GraphPtr graph, unsigned int layerIndex); void ParseReshape(GraphPtr graph, unsigned int layerIndex); void ParseResize(GraphPtr graph, unsigned int layerIndex); void ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md index b4982ec78a..4e2ead41bc 100644 --- a/src/armnnDeserializer/DeserializerSupport.md +++ b/src/armnnDeserializer/DeserializerSupport.md @@ -45,6 +45,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers: * Quantize * QLstm * QuantizedLstm +* Rank * Reshape * Resize * ResizeBilinear diff --git a/src/armnnDeserializer/test/DeserializeRank.cpp b/src/armnnDeserializer/test/DeserializeRank.cpp new file mode 100644 index 0000000000..8f14af150b --- /dev/null +++ b/src/armnnDeserializer/test/DeserializeRank.cpp @@ -0,0 +1,151 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersSerializeFixture.hpp" +#include "../Deserializer.hpp" + +#include + +BOOST_AUTO_TEST_SUITE(Deserializer) + +struct RankFixture : public ParserFlatbuffersSerializeFixture +{ + explicit RankFixture(const std::string &inputShape, + const std::string &dataType) + { + m_JsonString = R"( + { + inputIds: [0], + outputIds: [2], + layers: [ + { + layer_type: "InputLayer", + layer: { + base: { + base: { + layerName: "", + layerType: "Input", + inputSlots: [ + + ], + outputSlots: [ + { + tensorInfo: { + dimensions: )" + inputShape + R"(, + dataType: )" + dataType + R"(, + quantizationScale: 0.0 + } + } + ] + } + } + } + }, + { + layer_type: "RankLayer", + layer: { + base: { + index: 1, + layerName: "rank", + layerType: "Rank", + inputSlots: [ + { + connection: { + sourceLayerIndex: 0, + outputSlotIndex: 0 + } + } + ], + outputSlots: [ + { + tensorInfo: { + dimensions: [ 1 ], + dataType: "Signed32", + quantizationScale: 0.0, + dimensionality: 2 + } + } + ] + } + } + }, + { + layer_type: "OutputLayer", + layer: { + base: { + base: { + index: 2, + layerName: "", + layerType: "Output", + inputSlots: [ + { + connection: { + sourceLayerIndex: 1, + outputSlotIndex: 0 + } + } + ], + outputSlots: [] + } + } + } + } + ], + } + )"; + Setup(); + } +}; + +struct SimpleRankDimSize1Fixture : RankFixture +{ + SimpleRankDimSize1Fixture() : RankFixture("[ 8 ]", "QSymmS16") {} +}; + +struct SimpleRankDimSize2Fixture : RankFixture +{ + SimpleRankDimSize2Fixture() : RankFixture("[ 3, 3 ]", "QSymmS8") {} +}; + +struct SimpleRankDimSize3Fixture : RankFixture +{ + SimpleRankDimSize3Fixture() : RankFixture("[ 2, 2, 1 ]", "Signed32") {} +}; + +struct SimpleRankDimSize4Fixture : RankFixture +{ + SimpleRankDimSize4Fixture() : RankFixture("[ 2, 2, 1, 1 ]", "Float32") {} +}; + +BOOST_FIXTURE_TEST_CASE(RankDimSize1Float16, SimpleRankDimSize1Fixture) +{ + RunTest<1, armnn::DataType::QSymmS16, armnn::DataType::Signed32>( 0, + { 1, 2, 3, 4, 5, 6, 7, 8 }, + { 1 }); +} + +BOOST_FIXTURE_TEST_CASE(RankDimSize2QAsymmU8, SimpleRankDimSize2Fixture) +{ + RunTest<1, armnn::DataType::QSymmS8, armnn::DataType::Signed32>( 0, + { 1, 2, 3, 4, 5, 6, 7, 8, 9 }, + { 2 }); +} + +BOOST_FIXTURE_TEST_CASE(RankDimSize3Signed32, SimpleRankDimSize3Fixture) +{ + RunTest<1, armnn::DataType::Signed32, armnn::DataType::Signed32>( 0, + { 111, 85, 226, 3 }, + { 3 }); +} + +BOOST_FIXTURE_TEST_CASE(RankDimSize4Float32, SimpleRankDimSize4Fixture) +{ + RunTest<1, armnn::DataType::Float32, armnn::DataType::Signed32>( 0, + { 111, 85, 226, 3 }, + { 4 }); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file -- cgit v1.2.1