From 0c3ea5b8ac5ad8ca516930a0491afb1d1074e45b Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Wed, 3 Feb 2021 09:29:30 +0000 Subject: backends/reference: Add ReduceSum operation support This patch addes ReduceSum operation support for reference backend, which computes the sum of elements across dimensions of a tensor. Changelog v1: - Fix file header descriptions. Chagelog v2: - Fix line limit issue. - Fix type conversion issue. Changelog v3: - Remove tabs. - Modify newly added file headers. Changelog v4: - Symbol on header isn't allowed so drop it from newly added file headers. Changelog v5: - Remove tabs, fix the use of brackets and align lines correctly. Changelog v6: - Add serializer and deserializer support. Changelog v7: - Fix build error add missed code. Changelog v8: - Rename ReduceSumDecriptor to ReduceDescriptor - Update m_KeepDims field data type to bool on ReduceDescriptor - Add ReduceOperation field to ReduceDescriptor - Rename ReduceSumLayer to ReduceLayer - Update ReduceLayer to use ReduceDescriptor - Update ReduceLayer::ValidateTensorShapesFromInputs() function - Rename RefReduceSumWokload to RefReduceWorkload - Update workload to use ReduceDescriptor - Update workload to use Decoders and Encoders - Remove ReduceSum.hpp and ReduceSum.cpp - Added Reduce.hpp and Reduce.cpp - Move Mean.cpp (which is implementing REDUCE_MEAN) functionality to Reduce.cpp - Update RefMeanWorkload to call Reduce function with ReduceOperation::Mean argument - Remove Mean.hpp and Mean.cpp - Update the Serializer/Deserializer ArmnnSchema.fbs for ReduceLayer, ReduceDescriptor, and ReduceOperation - Update Serializer and Deserializer for serializing/parsing ReduceLayer - Added TfLiter parser Sum test for REDUCE_SUM operator - Make corresponding changes on front-end and Ref backend to support REDUCE_SUM operator Changelog v9: - Fixed build errors. Change-Id: I8c8e034f3df73f9565b3c18eff51ecca6c542195 Signed-off-by: Inki Dae Signed-off-by: Sadik Armagan --- src/armnnDeserializer/Deserializer.cpp | 52 ++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'src/armnnDeserializer/Deserializer.cpp') diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 14c2af4fe1..e98ff15aa9 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -251,6 +251,7 @@ m_ParserFunctions(Layer_MAX+1, &IDeserializer::DeserializerImpl::ParseUnsupporte m_ParserFunctions[Layer_QuantizeLayer] = &DeserializerImpl::ParseQuantize; m_ParserFunctions[Layer_QuantizedLstmLayer] = &DeserializerImpl::ParseQuantizedLstm; m_ParserFunctions[Layer_RankLayer] = &DeserializerImpl::ParseRank; + m_ParserFunctions[Layer_ReduceLayer] = &DeserializerImpl::ParseReduce; m_ParserFunctions[Layer_ReshapeLayer] = &DeserializerImpl::ParseReshape; m_ParserFunctions[Layer_ResizeBilinearLayer] = &DeserializerImpl::ParseResizeBilinear; m_ParserFunctions[Layer_ResizeLayer] = &DeserializerImpl::ParseResize; @@ -363,6 +364,8 @@ LayerBaseRawPtr IDeserializer::DeserializerImpl::GetBaseLayer(const GraphPtr& gr return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizedLstmLayer()->base(); case Layer::Layer_RankLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_RankLayer()->base(); + case Layer::Layer_ReduceLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_ReduceLayer()->base(); case Layer::Layer_ReshapeLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base(); case Layer::Layer_ResizeBilinearLayer: @@ -498,6 +501,23 @@ armnn::ComparisonOperation ToComparisonOperation(armnnSerializer::ComparisonOper } } +armnn::ReduceOperation ToReduceOperation(armnnSerializer::ReduceOperation operation) +{ + switch (operation) + { + case armnnSerializer::ReduceOperation::ReduceOperation_Sum: + return armnn::ReduceOperation::Sum; + case armnnSerializer::ReduceOperation::ReduceOperation_Max: + return armnn::ReduceOperation::Max; + case armnnSerializer::ReduceOperation::ReduceOperation_Mean: + return armnn::ReduceOperation::Mean; + case armnnSerializer::ReduceOperation::ReduceOperation_Min: + return armnn::ReduceOperation::Min; + default: + return armnn::ReduceOperation::Sum; + } +} + armnn::LogicalBinaryOperation ToLogicalBinaryOperation(armnnSerializer::LogicalBinaryOperation operation) { switch (operation) @@ -2082,6 +2102,38 @@ void IDeserializer::DeserializerImpl::ParseRank(GraphPtr graph, unsigned int lay RegisterOutputSlots(graph, layerIndex, layer); } +void IDeserializer::DeserializerImpl::ParseReduce(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + CHECK_LOCATION(); + + auto inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + auto outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_ReduceLayer(); + auto fbDescriptor = fbLayer->descriptor(); + auto flatBufferAxis = fbDescriptor->axis(); + + armnn::ReduceDescriptor descriptor; + descriptor.m_TargetHeight = fbDescriptor->targetHeight(); + descriptor.m_TargetWidth = fbDescriptor->targetWidth(); + descriptor.m_KeepDims = fbDescriptor->keepDims(); + descriptor.m_vAxis = std::vector(flatBufferAxis->begin(), flatBufferAxis->end()); + descriptor.m_ReduceOperation = ToReduceOperation(fbDescriptor->reduceOperation()); + + const std::string& layerName = GetLayerName(graph, layerIndex); + IConnectableLayer* layer = m_Network->AddReduceLayer(descriptor, layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + void IDeserializer::DeserializerImpl::ParseReshape(GraphPtr graph, unsigned int layerIndex) { CHECK_LAYERS(graph, 0, layerIndex); -- cgit v1.2.1