aboutsummaryrefslogtreecommitdiff
path: root/src/armnnDeserializer
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnDeserializer')
-rw-r--r--src/armnnDeserializer/Deserializer.cpp41
-rw-r--r--src/armnnDeserializer/Deserializer.hpp1
-rw-r--r--src/armnnDeserializer/DeserializerSupport.md3
-rw-r--r--src/armnnDeserializer/test/DeserializePad.cpp129
4 files changed, 173 insertions, 1 deletions
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index c7049f6a15..b8a1eaa84a 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -198,6 +198,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer)
m_ParserFunctions[Layer_MaximumLayer] = &Deserializer::ParseMaximum;
m_ParserFunctions[Layer_MultiplicationLayer] = &Deserializer::ParseMultiplication;
m_ParserFunctions[Layer_NormalizationLayer] = &Deserializer::ParseNormalization;
+ m_ParserFunctions[Layer_PadLayer] = &Deserializer::ParsePad;
m_ParserFunctions[Layer_PermuteLayer] = &Deserializer::ParsePermute;
m_ParserFunctions[Layer_Pooling2dLayer] = &Deserializer::ParsePooling2d;
m_ParserFunctions[Layer_ReshapeLayer] = &Deserializer::ParseReshape;
@@ -241,6 +242,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt
return graphPtr->layers()->Get(layerIndex)->layer_as_NormalizationLayer()->base();
case Layer::Layer_OutputLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->base();
+ case Layer::Layer_PadLayer:
+ return graphPtr->layers()->Get(layerIndex)->layer_as_PadLayer()->base();
case Layer::Layer_PermuteLayer:
return graphPtr->layers()->Get(layerIndex)->layer_as_PermuteLayer()->base();
case Layer::Layer_Pooling2dLayer:
@@ -1090,6 +1093,44 @@ void Deserializer::ParseFullyConnected(GraphPtr graph, unsigned int layerIndex)
RegisterOutputSlots(graph, layerIndex, layer);
}
+void Deserializer::ParsePad(GraphPtr graph, unsigned int layerIndex)
+{
+ CHECK_LAYERS(graph, 0, layerIndex);
+
+ Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
+ CHECK_VALID_SIZE(inputs.size(), 1);
+
+ Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
+ CHECK_VALID_SIZE(outputs.size(), 1);
+
+ auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_PadLayer()->descriptor();
+ auto flatBufferPadList = flatBufferDescriptor->padList();
+
+ if (flatBufferPadList->Length() % 2 != 0)
+ {
+ throw ParseException(boost::str(
+ boost::format("The size of the pad list must be divisible by 2 %1%") % CHECK_LOCATION().AsString()));
+ }
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.reserve(flatBufferPadList->Length() / 2);
+ for (unsigned int i = 0; i < flatBufferPadList->Length() - 1; i += 2)
+ {
+ padList.emplace_back(flatBufferPadList->Get(i), flatBufferPadList->Get(i+1));
+ }
+
+ armnn::PadDescriptor descriptor(padList);
+
+ auto layerName = GetLayerName(graph, layerIndex);
+ IConnectableLayer* layer = m_Network->AddPadLayer(descriptor, layerName.c_str());
+
+ armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ RegisterInputSlots(graph, layerIndex, layer);
+ RegisterOutputSlots(graph, layerIndex, layer);
+}
+
void Deserializer::ParsePermute(GraphPtr graph, unsigned int layerIndex)
{
CHECK_LAYERS(graph, 0, layerIndex);
diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp
index fba8b88044..ec10cb5817 100644
--- a/src/armnnDeserializer/Deserializer.hpp
+++ b/src/armnnDeserializer/Deserializer.hpp
@@ -84,6 +84,7 @@ private:
void ParseMaximum(GraphPtr graph, unsigned int layerIndex);
void ParseMultiplication(GraphPtr graph, unsigned int layerIndex);
void ParseNormalization(GraphPtr graph, unsigned int layerIndex);
+ void ParsePad(GraphPtr graph, unsigned int layerIndex);
void ParsePermute(GraphPtr graph, unsigned int layerIndex);
void ParsePooling2d(GraphPtr graph, unsigned int layerIndex);
void ParseReshape(GraphPtr graph, unsigned int layerIndex);
diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md
index cf8f6dea2d..1f479b94b8 100644
--- a/src/armnnDeserializer/DeserializerSupport.md
+++ b/src/armnnDeserializer/DeserializerSupport.md
@@ -19,10 +19,11 @@ The Arm NN SDK Deserialize parser currently supports the following layers:
* Minimum
* Multiplication
* Normalization
+* Pad
* Permute
* Pooling2d
* Reshape
* Softmax
* SpaceToBatchNd
-More machine learning layers will be supported in future releases. \ No newline at end of file
+More machine learning layers will be supported in future releases.
diff --git a/src/armnnDeserializer/test/DeserializePad.cpp b/src/armnnDeserializer/test/DeserializePad.cpp
new file mode 100644
index 0000000000..b18710a381
--- /dev/null
+++ b/src/armnnDeserializer/test/DeserializePad.cpp
@@ -0,0 +1,129 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct PadFixture : public ParserFlatbuffersSerializeFixture
+{
+ explicit PadFixture(const std::string &inputShape,
+ const std::string &padList,
+ const std::string &outputShape,
+ const std::string &dataType)
+ {
+ m_JsonString = R"(
+ {
+ inputIds: [0],
+ outputIds: [2],
+ layers: [
+ {
+ layer_type: "InputLayer",
+ layer: {
+ base: {
+ layerBindingId: 0,
+ base: {
+ index: 0,
+ layerName: "InputLayer",
+ layerType: "Input",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + inputShape + R"(,
+ dataType: )" + dataType + R"(
+ }
+ }]
+ }
+ }
+ }
+ },
+ {
+ layer_type: "PadLayer",
+ layer: {
+ base: {
+ index: 1,
+ layerName: "PadLayer",
+ layerType: "Pad",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ }
+ }]
+ },
+ descriptor: {
+ padList: )" + padList + R"(,
+ }
+ }
+ },
+ {
+ layer_type: "OutputLayer",
+ layer: {
+ base:{
+ layerBindingId: 2,
+ base: {
+ index: 2,
+ layerName: "OutputLayer",
+ layerType: "Output",
+ inputSlots: [{
+ index: 0,
+ connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+ }],
+ outputSlots: [{
+ index: 0,
+ tensorInfo: {
+ dimensions: )" + outputShape + R"(,
+ dataType: )" + dataType + R"(
+ },
+ }],
+ }
+ }
+ },
+ }
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("InputLayer", "OutputLayer");
+ }
+};
+
+struct SimplePadFixture : PadFixture
+{
+ SimplePadFixture() : PadFixture("[ 2, 2, 2 ]",
+ "[ 0, 1, 2, 1, 2, 2 ]",
+ "[ 3, 5, 6 ]",
+ "QuantisedAsymm8") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SimplePadQuantisedAsymm8, SimplePadFixture)
+{
+ RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+ {
+ 0, 4, 2, 5, 6, 1, 5, 2
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 4, 0, 0, 0, 0, 2, 5, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
+ 1, 0, 0, 0, 0, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ });
+}
+
+BOOST_AUTO_TEST_SUITE_END()