aboutsummaryrefslogtreecommitdiff
path: root/src/armnnDeserializer
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnDeserializer')
-rw-r--r--src/armnnDeserializer/Deserializer.cpp52
-rw-r--r--src/armnnDeserializer/Deserializer.hpp1
-rw-r--r--src/armnnDeserializer/test/DeserializeReduceSum.cpp126
3 files changed, 179 insertions, 0 deletions
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index 14c2af4fe1..e98ff15aa9 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -251,6 +251,7 @@ m_ParserFunctions(Layer_MAX+1, &IDeserializer::DeserializerImpl::ParseUnsupporte
m_ParserFunctions[Layer_QuantizeLayer] = &DeserializerImpl::ParseQuantize;
m_ParserFunctions[Layer_QuantizedLstmLayer] = &DeserializerImpl::ParseQuantizedLstm;
m_ParserFunctions[Layer_RankLayer] = &DeserializerImpl::ParseRank;
+ m_ParserFunctions[Layer_ReduceLayer] = &DeserializerImpl::ParseReduce;
m_ParserFunctions[Layer_ReshapeLayer] = &DeserializerImpl::ParseReshape;
m_ParserFunctions[Layer_ResizeBilinearLayer] = &DeserializerImpl::ParseResizeBilinear;
m_ParserFunctions[Layer_ResizeLayer] = &DeserializerImpl::ParseResize;
@@ -363,6 +364,8 @@ LayerBaseRawPtr IDeserializer::DeserializerImpl::GetBaseLayer(const GraphPtr& gr
return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizedLstmLayer()->base();
case Layer::Layer_RankLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_RankLayer()->base();
+ case Layer::Layer_ReduceLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_ReduceLayer()->base();
case Layer::Layer_ReshapeLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base();
case Layer::Layer_ResizeBilinearLayer:
@@ -498,6 +501,23 @@ armnn::ComparisonOperation ToComparisonOperation(armnnSerializer::ComparisonOper
}
}
+armnn::ReduceOperation ToReduceOperation(armnnSerializer::ReduceOperation operation)
+{
+ switch (operation)
+ {
+ case armnnSerializer::ReduceOperation::ReduceOperation_Sum:
+ return armnn::ReduceOperation::Sum;
+ case armnnSerializer::ReduceOperation::ReduceOperation_Max:
+ return armnn::ReduceOperation::Max;
+ case armnnSerializer::ReduceOperation::ReduceOperation_Mean:
+ return armnn::ReduceOperation::Mean;
+ case armnnSerializer::ReduceOperation::ReduceOperation_Min:
+ return armnn::ReduceOperation::Min;
+ default:
+ return armnn::ReduceOperation::Sum;
+ }
+}
+
armnn::LogicalBinaryOperation ToLogicalBinaryOperation(armnnSerializer::LogicalBinaryOperation operation)
{
switch (operation)
@@ -2082,6 +2102,38 @@ void IDeserializer::DeserializerImpl::ParseRank(GraphPtr graph, unsigned int lay
RegisterOutputSlots(graph, layerIndex, layer);
}
+void IDeserializer::DeserializerImpl::ParseReduce(GraphPtr graph, unsigned int layerIndex)
+{
+ CHECK_LAYERS(graph, 0, layerIndex);
+ CHECK_LOCATION();
+
+ auto inputs = GetInputs(graph, layerIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ auto outputs = GetOutputs(graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto fbLayer = graph->layers()->Get(layerIndex)->layer_as_ReduceLayer();
+ auto fbDescriptor = fbLayer->descriptor();
+ auto flatBufferAxis = fbDescriptor->axis();
+
+ armnn::ReduceDescriptor descriptor;
+ descriptor.m_TargetHeight = fbDescriptor->targetHeight();
+ descriptor.m_TargetWidth = fbDescriptor->targetWidth();
+ descriptor.m_KeepDims = fbDescriptor->keepDims();
+ descriptor.m_vAxis = std::vector<unsigned int>(flatBufferAxis->begin(), flatBufferAxis->end());
+ descriptor.m_ReduceOperation = ToReduceOperation(fbDescriptor->reduceOperation());
+
+ const std::string& layerName = GetLayerName(graph, layerIndex);
+ IConnectableLayer* layer = m_Network->AddReduceLayer(descriptor, layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(graph, layerIndex, layer);
+ RegisterOutputSlots(graph, layerIndex, layer);
+}
+
void IDeserializer::DeserializerImpl::ParseReshape(GraphPtr graph, unsigned int layerIndex)
{
CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index e232feed9b..f4f64240a0 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -119,6 +119,7 @@ private:
void ParseQLstm(GraphPtr graph, unsigned int layerIndex);
void ParseQuantize(GraphPtr graph, unsigned int layerIndex);
void ParseRank(GraphPtr graph, unsigned int layerIndex);
+ void ParseReduce(GraphPtr graph, unsigned int layerIndex);
void ParseReshape(GraphPtr graph, unsigned int layerIndex);
void ParseResize(GraphPtr graph, unsigned int layerIndex);
void ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/test/DeserializeReduceSum.cpp b/src/armnnDeserializer/test/DeserializeReduceSum.cpp
new file mode 100644
index 0000000000..d88613e593
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializeReduceSum.cpp
@@ -0,0 +1,126 @@
+//
+// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct ReduceSumFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit ReduceSumFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& axis,
+ const std::string& dataType)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: )" + dataType + R"(
+ }
+ }]
+ }
+ }
+ }
+ },
+ {
+ layer_type: "ReduceLayer",
+ layer: {
+ base: {
+ index: 1,
+ layerName: "ReduceSumLayer",
+ layerType: "Reduce",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ }
+ }]
+ },
+ descriptor: {
+ axis: )" + axis + R"(,
+ keepDims: true,
+ reduceOperation: Sum
+ }
+ }
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 2,
+ base: {
+ index: 2,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }
+ }
+ },
+ }
+ ]
+ }
+ )";
+ Setup();
+ }
+};
+
+struct SimpleReduceSumFixture : ReduceSumFixture
+{
+ SimpleReduceSumFixture()
+ : ReduceSumFixture("[ 1, 1, 3, 2 ]", // inputShape
+ "[ 1, 1, 1, 2 ]", // outputShape
+ "[ 2 ]", // axis
+ "Float32") // dataType
+ {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SimpleReduceSum, SimpleReduceSumFixture)
+{
+ RunTest<4, armnn::DataType::Float32>(
+ 0,
+ {{"InputLayer", { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f }}},
+ {{"OutputLayer", { 6.0f, 6.0f }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()