diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2021-02-03 09:29:30 +0000 |
---|---|---|
committer | Sadik Armagan <sadik.armagan@arm.com> | 2021-02-03 09:29:47 +0000 |
commit | 0c3ea5b8ac5ad8ca516930a0491afb1d1074e45b (patch) | |
tree | 47ff1e9c1c70a3b134c1e9063dada66d70a7c963 /src/armnnDeserializer | |
parent | 84f41eb74765bd93307f3c6b334354c486dc746d (diff) | |
download | armnn-0c3ea5b8ac5ad8ca516930a0491afb1d1074e45b.tar.gz |
backends/reference: Add ReduceSum operation support
This patch addes ReduceSum operation support for reference backend,
which computes the sum of elements across dimensions of a tensor.
Changelog v1:
- Fix file header descriptions.
Chagelog v2:
- Fix line limit issue.
- Fix type conversion issue.
Changelog v3:
- Remove tabs.
- Modify newly added file headers.
Changelog v4:
- Symbol on header isn't allowed so drop it from newly added file headers.
Changelog v5:
- Remove tabs, fix the use of brackets and align lines correctly.
Changelog v6:
- Add serializer and deserializer support.
Changelog v7:
- Fix build error add missed code.
Changelog v8:
- Rename ReduceSumDecriptor to ReduceDescriptor
- Update m_KeepDims field data type to bool on ReduceDescriptor
- Add ReduceOperation field to ReduceDescriptor
- Rename ReduceSumLayer to ReduceLayer
- Update ReduceLayer to use ReduceDescriptor
- Update ReduceLayer::ValidateTensorShapesFromInputs() function
- Rename RefReduceSumWokload to RefReduceWorkload
- Update workload to use ReduceDescriptor
- Update workload to use Decoders and Encoders
- Remove ReduceSum.hpp and ReduceSum.cpp
- Added Reduce.hpp and Reduce.cpp
- Move Mean.cpp (which is implementing REDUCE_MEAN) functionality to Reduce.cpp
- Update RefMeanWorkload to call Reduce function with ReduceOperation::Mean argument
- Remove Mean.hpp and Mean.cpp
- Update the Serializer/Deserializer ArmnnSchema.fbs for ReduceLayer, ReduceDescriptor, and ReduceOperation
- Update Serializer and Deserializer for serializing/parsing ReduceLayer
- Added TfLiter parser Sum test for REDUCE_SUM operator
- Make corresponding changes on front-end and Ref backend to support REDUCE_SUM operator
Changelog v9:
- Fixed build errors.
Change-Id: I8c8e034f3df73f9565b3c18eff51ecca6c542195
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Diffstat (limited to 'src/armnnDeserializer')
-rw-r--r-- | src/armnnDeserializer/Deserializer.cpp | 52 | ||||
-rw-r--r-- | src/armnnDeserializer/Deserializer.hpp | 1 | ||||
-rw-r--r-- | src/armnnDeserializer/test/DeserializeReduceSum.cpp | 126 |
3 files changed, 179 insertions, 0 deletions
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 14c2af4fe1..e98ff15aa9 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -251,6 +251,7 @@ m_ParserFunctions(Layer_MAX+1, &IDeserializer::DeserializerImpl::ParseUnsupporte m_ParserFunctions[Layer_QuantizeLayer] = &DeserializerImpl::ParseQuantize; m_ParserFunctions[Layer_QuantizedLstmLayer] = &DeserializerImpl::ParseQuantizedLstm; m_ParserFunctions[Layer_RankLayer] = &DeserializerImpl::ParseRank; + m_ParserFunctions[Layer_ReduceLayer] = &DeserializerImpl::ParseReduce; m_ParserFunctions[Layer_ReshapeLayer] = &DeserializerImpl::ParseReshape; m_ParserFunctions[Layer_ResizeBilinearLayer] = &DeserializerImpl::ParseResizeBilinear; m_ParserFunctions[Layer_ResizeLayer] = &DeserializerImpl::ParseResize; @@ -363,6 +364,8 @@ LayerBaseRawPtr IDeserializer::DeserializerImpl::GetBaseLayer(const GraphPtr& gr return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizedLstmLayer()->base(); case Layer::Layer_RankLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_RankLayer()->base(); + case Layer::Layer_ReduceLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_ReduceLayer()->base(); case Layer::Layer_ReshapeLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base(); case Layer::Layer_ResizeBilinearLayer: @@ -498,6 +501,23 @@ armnn::ComparisonOperation ToComparisonOperation(armnnSerializer::ComparisonOper } } +armnn::ReduceOperation ToReduceOperation(armnnSerializer::ReduceOperation operation) +{ + switch (operation) + { + case armnnSerializer::ReduceOperation::ReduceOperation_Sum: + return armnn::ReduceOperation::Sum; + case armnnSerializer::ReduceOperation::ReduceOperation_Max: + return armnn::ReduceOperation::Max; + case armnnSerializer::ReduceOperation::ReduceOperation_Mean: + return armnn::ReduceOperation::Mean; + case armnnSerializer::ReduceOperation::ReduceOperation_Min: + return armnn::ReduceOperation::Min; + default: + return armnn::ReduceOperation::Sum; + } +} + armnn::LogicalBinaryOperation ToLogicalBinaryOperation(armnnSerializer::LogicalBinaryOperation operation) { switch (operation) @@ -2082,6 +2102,38 @@ void IDeserializer::DeserializerImpl::ParseRank(GraphPtr graph, unsigned int lay RegisterOutputSlots(graph, layerIndex, layer); } +void IDeserializer::DeserializerImpl::ParseReduce(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + CHECK_LOCATION(); + + auto inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + auto outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_ReduceLayer(); + auto fbDescriptor = fbLayer->descriptor(); + auto flatBufferAxis = fbDescriptor->axis(); + + armnn::ReduceDescriptor descriptor; + descriptor.m_TargetHeight = fbDescriptor->targetHeight(); + descriptor.m_TargetWidth = fbDescriptor->targetWidth(); + descriptor.m_KeepDims = fbDescriptor->keepDims(); + descriptor.m_vAxis = std::vector<unsigned int>(flatBufferAxis->begin(), flatBufferAxis->end()); + descriptor.m_ReduceOperation = ToReduceOperation(fbDescriptor->reduceOperation()); + + const std::string& layerName = GetLayerName(graph, layerIndex); + IConnectableLayer* layer = m_Network->AddReduceLayer(descriptor, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + void IDeserializer::DeserializerImpl::ParseReshape(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index e232feed9b..f4f64240a0 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -119,6 +119,7 @@ private: void ParseQLstm(GraphPtr graph, unsigned int layerIndex); void ParseQuantize(GraphPtr graph, unsigned int layerIndex); void ParseRank(GraphPtr graph, unsigned int layerIndex); + void ParseReduce(GraphPtr graph, unsigned int layerIndex); void ParseReshape(GraphPtr graph, unsigned int layerIndex); void ParseResize(GraphPtr graph, unsigned int layerIndex); void ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnDeserializer/test/DeserializeReduceSum.cpp b/src/armnnDeserializer/test/DeserializeReduceSum.cpp new file mode 100644 index 0000000000..d88613e593 --- /dev/null +++ b/src/armnnDeserializer/test/DeserializeReduceSum.cpp @@ -0,0 +1,126 @@ +// +// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include <boost/test/unit_test.hpp> +#include "ParserFlatbuffersSerializeFixture.hpp" +#include "../Deserializer.hpp" + +#include <string> +#include <iostream> + +BOOST_AUTO_TEST_SUITE(Deserializer) + +struct ReduceSumFixture : public ParserFlatbuffersSerializeFixture +{ + explicit ReduceSumFixture(const std::string& inputShape, + const std::string& outputShape, + const std::string& axis, + const std::string& dataType) + { + m_JsonString = R"( + { + inputIds: [0], + outputIds: [2], + layers: [ + { + layer_type: "InputLayer", + layer: { + base: { + layerBindingId: 0, + base: { + index: 0, + layerName: "InputLayer", + layerType: "Input", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + inputShape + R"(, + dataType: )" + dataType + R"( + } + }] + } + } + } + }, + { + layer_type: "ReduceLayer", + layer: { + base: { + index: 1, + layerName: "ReduceSumLayer", + layerType: "Reduce", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:0, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: )" + dataType + R"( + } + }] + }, + descriptor: { + axis: )" + axis + R"(, + keepDims: true, + reduceOperation: Sum + } + } + }, + { + layer_type: "OutputLayer", + layer: { + base:{ + layerBindingId: 2, + base: { + index: 2, + layerName: "OutputLayer", + layerType: "Output", + inputSlots: [{ + index: 0, + connection: {sourceLayerIndex:1, outputSlotIndex:0 }, + }], + outputSlots: [{ + index: 0, + tensorInfo: { + dimensions: )" + outputShape + R"(, + dataType: )" + dataType + R"( + }, + }], + } + } + }, + } + ] + } + )"; + Setup(); + } +}; + +struct SimpleReduceSumFixture : ReduceSumFixture +{ + SimpleReduceSumFixture() + : ReduceSumFixture("[ 1, 1, 3, 2 ]", // inputShape + "[ 1, 1, 1, 2 ]", // outputShape + "[ 2 ]", // axis + "Float32") // dataType + {} +}; + +BOOST_FIXTURE_TEST_CASE(SimpleReduceSum, SimpleReduceSumFixture) +{ + RunTest<4, armnn::DataType::Float32>( + 0, + {{"InputLayer", { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f }}}, + {{"OutputLayer", { 6.0f, 6.0f }}}); +} + +BOOST_AUTO_TEST_SUITE_END() |